Fix Rust UB problems (#6393)

* Fix miri problems by assuming alignment is 1 in rust

* Removed is_aligned fn from rust verifier.

* Add back is_aligned, but make it w.r.t. buffer[0]

* touch unused variable

* touch unused variable

* +nightly

* Move Rust miri testing into its own docker

* fix bash

* missing one endian conversion

* fix endianness2

* format stuff

Co-authored-by: Casper Neo <cneo@google.com>
This commit is contained in:
Casper 2021-01-11 15:24:52 -05:00 committed by GitHub
parent 39e115fdb4
commit 408cf58024
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 540 additions and 197 deletions

View File

@ -1,6 +1,6 @@
[package]
name = "flatbuffers"
version = "0.8.0"
version = "0.8.1"
edition = "2018"
authors = ["Robert Winslow <hello@rwinslow.com>", "FlatBuffers Maintainers"]
license = "Apache-2.0"

View File

@ -150,11 +150,13 @@ pub fn byte_swap_f64(x: f64) -> f64 {
/// endian conversion, if necessary.
#[inline]
pub fn emplace_scalar<T: EndianScalar>(s: &mut [u8], x: T) {
let sz = size_of::<T>();
let mut_ptr = (&mut s[..sz]).as_mut_ptr() as *mut T;
let val = x.to_little_endian();
let x_le = x.to_little_endian();
unsafe {
*mut_ptr = val;
core::ptr::copy_nonoverlapping(
&x_le as *const T as *const u8,
s.as_mut_ptr() as *mut u8,
size_of::<T>()
);
}
}
@ -162,18 +164,22 @@ pub fn emplace_scalar<T: EndianScalar>(s: &mut [u8], x: T) {
/// Performs endian conversion, if necessary.
#[inline]
pub fn read_scalar_at<T: EndianScalar>(s: &[u8], loc: usize) -> T {
let buf = &s[loc..loc + size_of::<T>()];
read_scalar(buf)
read_scalar(&s[loc..])
}
/// Read an EndianScalar from the provided byte slice. Performs endian
/// conversion, if necessary.
#[inline]
pub fn read_scalar<T: EndianScalar>(s: &[u8]) -> T {
let sz = size_of::<T>();
let p = (&s[..sz]).as_ptr() as *const T;
let x = unsafe { *p };
let mut mem = core::mem::MaybeUninit::<T>::uninit();
// Since [u8] has alignment 1, we copy it into T which may have higher alignment.
let x = unsafe {
core::ptr::copy_nonoverlapping(
s.as_ptr(),
mem.as_mut_ptr() as *mut u8,
size_of::<T>()
);
mem.assume_init()
};
x.from_little_endian()
}

View File

@ -232,12 +232,16 @@ impl<'opts, 'buf> Verifier<'opts, 'buf> {
self.num_tables = 0;
self.num_tables = 0;
}
/// Check that there really is a T in there.
/// Checks `pos` is aligned to T's alignment. This does not mean `buffer[pos]` is aligned w.r.t
/// memory since `buffer: &[u8]` has alignment 1.
///
/// ### WARNING
/// This does not work for flatbuffers-structs as they have alignment 1 according to
/// `core::mem::align_of` but are meant to have higher alignment within a Flatbuffer w.r.t.
/// `buffer[0]`. TODO(caspern).
#[inline]
fn is_aligned<T>(&self, pos: usize) -> Result<()> {
// Safe because we're not dereferencing.
let p = unsafe { self.buffer.as_ptr().add(pos) };
if (p as usize) % std::mem::align_of::<T>() == 0 {
if pos % std::mem::align_of::<T>() == 0 {
Ok(())
} else {
Err(InvalidFlatbuffer::Unaligned {
@ -259,6 +263,7 @@ impl<'opts, 'buf> Verifier<'opts, 'buf> {
}
Ok(())
}
/// Check that there really is a T in there.
#[inline]
pub fn in_buffer<T>(&mut self, pos: usize) -> Result<()> {
self.is_aligned::<T>(pos)?;

View File

@ -200,13 +200,9 @@ impl<'a> flatbuffers::Verifiable for Equipment {
impl flatbuffers::SimpleToVerifyInSlice for Equipment {}
// struct Vec3, aligned to 4
#[repr(C, align(4))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Vec3 {
x_: f32,
y_: f32,
z_: f32,
} // pub struct Vec3
pub struct Vec3(pub [u8; 12]);
impl std::fmt::Debug for Vec3 {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Vec3")
@ -266,23 +262,87 @@ impl<'a> flatbuffers::Verifiable for Vec3 {
}
impl Vec3 {
#[allow(clippy::too_many_arguments)]
pub fn new(_x: f32, _y: f32, _z: f32) -> Self {
Vec3 {
x_: _x.to_little_endian(),
y_: _y.to_little_endian(),
z_: _z.to_little_endian(),
pub fn new(
x: f32,
y: f32,
z: f32,
) -> Self {
let mut s = Self([0; 12]);
s.set_x(x);
s.set_y(y);
s.set_z(z);
s
}
pub fn x(&self) -> f32 {
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_x(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
pub fn x(&self) -> f32 {
self.x_.from_little_endian()
}
pub fn y(&self) -> f32 {
self.y_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[4..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_y(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[4..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
pub fn z(&self) -> f32 {
self.z_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[8..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_z(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[8..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
}
pub enum MonsterOffset {}

View File

@ -1729,12 +1729,16 @@ class RustGenerator : public BaseGenerator {
const StructDef &struct_def,
std::function<void(const FieldDef &field)> cb
) {
size_t offset_to_field = 0;
for (auto it = struct_def.fields.vec.begin();
it != struct_def.fields.vec.end(); ++it) {
const auto &field = **it;
code_.SetValue("FIELD_TYPE", GetTypeGet(field.value.type));
code_.SetValue("FIELD_NAME", Name(field));
code_.SetValue("FIELD_OFFSET", NumToString(offset_to_field));
code_.SetValue("REF", IsStruct(field.value.type) ? "&" : "");
cb(field);
offset_to_field += SizeOf(field.value.type.base_type) + field.padding;
}
}
// Generate an accessor struct with constructor for a flatbuffers struct.
@ -1745,26 +1749,18 @@ class RustGenerator : public BaseGenerator {
GenComment(struct_def.doc_comment);
code_.SetValue("ALIGN", NumToString(struct_def.minalign));
code_.SetValue("STRUCT_NAME", Name(struct_def));
code_.SetValue("STRUCT_SIZE", NumToString(struct_def.bytesize));
code_ += "// struct {{STRUCT_NAME}}, aligned to {{ALIGN}}";
code_ += "#[repr(C, align({{ALIGN}}))]";
// We represent Flatbuffers-structs in Rust-u8-arrays since the data may be
// of the wrong endianness and alignment 1.
//
// PartialEq is useful to derive because we can correctly compare structs
// for equality by just comparing their underlying byte data. This doesn't
// hold for PartialOrd/Ord.
code_ += "// struct {{STRUCT_NAME}}, aligned to {{ALIGN}}";
code_ += "#[repr(transparent)]";
code_ += "#[derive(Clone, Copy, PartialEq)]";
code_ += "pub struct {{STRUCT_NAME}} {";
int padding_id = 0;
ForAllStructFields(struct_def, [&](const FieldDef &field) {
code_ += " {{FIELD_NAME}}_: {{FIELD_TYPE}},";
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingDefinition);
code_ += padding;
}
});
code_ += "} // pub struct {{STRUCT_NAME}}";
code_ += "pub struct {{STRUCT_NAME}}(pub [u8; {{STRUCT_SIZE}}]);";
// Debug for structs.
code_ += "impl std::fmt::Debug for {{STRUCT_NAME}} {";
@ -1841,37 +1837,21 @@ class RustGenerator : public BaseGenerator {
// Generate a constructor that takes all fields as arguments.
code_ += "impl {{STRUCT_NAME}} {";
// TODO(cneo): Stop generating args on one line. Make it simpler.
bool first_arg = true;
code_ += " #[allow(clippy::too_many_arguments)]";
code_ += " pub fn new(\\";
ForAllStructFields(struct_def, [&](const FieldDef &field) {
if (first_arg) first_arg = false; else code_ += ", \\";
code_.SetValue("REF", IsStruct(field.value.type) ? "&" : "");
code_ += "_{{FIELD_NAME}}: {{REF}}{{FIELD_TYPE}}\\";
code_ += " pub fn new(";
ForAllStructFields(struct_def, [&](const FieldDef &unused) {
(void)unused;
code_ += " {{FIELD_NAME}}: {{REF}}{{FIELD_TYPE}},";
});
code_ += ") -> Self {";
code_ += " {{STRUCT_NAME}} {";
ForAllStructFields(struct_def, [&](const FieldDef &field) {
const bool is_struct = IsStruct(field.value.type);
code_.SetValue("DEREF", is_struct ? "*" : "");
code_.SetValue("TO_LE", is_struct ? "" : ".to_little_endian()");
code_ += " {{FIELD_NAME}}_: {{DEREF}}_{{FIELD_NAME}}{{TO_LE}},";
code_ += " ) -> Self {";
code_ += " let mut s = Self([0; {{STRUCT_SIZE}}]);";
ForAllStructFields(struct_def, [&](const FieldDef &unused) {
(void)unused;
code_ += " s.set_{{FIELD_NAME}}({{REF}}{{FIELD_NAME}});";
});
code_ += "";
// TODO(cneo): Does this padding even work? Why after all the fields?
padding_id = 0;
ForAllStructFields(struct_def, [&](const FieldDef &field) {
if (field.padding) {
std::string padding;
GenPadding(field, &padding, &padding_id, PaddingInitializer);
code_ += " " + padding;
}
});
code_ += " }";
code_ += " s";
code_ += " }";
code_ += "";
if (parser_.opts.generate_name_strings) {
GenFullyQualifiedNameGetter(struct_def, struct_def.name);
@ -1879,14 +1859,49 @@ class RustGenerator : public BaseGenerator {
// Generate accessor methods for the struct.
ForAllStructFields(struct_def, [&](const FieldDef &field) {
const bool is_struct = IsStruct(field.value.type);
code_.SetValue("REF", is_struct ? "&" : "");
code_.SetValue("FROM_LE", is_struct ? "" : ".from_little_endian()");
this->GenComment(field.doc_comment, " ");
code_ += " pub fn {{FIELD_NAME}}(&self) -> {{REF}}{{FIELD_TYPE}} {";
code_ += " {{REF}}self.{{FIELD_NAME}}_{{FROM_LE}}";
code_ += " }";
// Getter.
if (IsStruct(field.value.type)) {
code_ += " pub fn {{FIELD_NAME}}(&self) -> &{{FIELD_TYPE}} {";
code_ +=
" unsafe {"
" &*(self.0[{{FIELD_OFFSET}}..].as_ptr() as *const"
" {{FIELD_TYPE}}) }";
} else {
code_ += " pub fn {{FIELD_NAME}}(&self) -> {{FIELD_TYPE}} {";
code_ +=
" let mut mem = core::mem::MaybeUninit::"
"<{{FIELD_TYPE}}>::uninit();";
code_ += " unsafe {";
code_ += " core::ptr::copy_nonoverlapping(";
code_ += " self.0[{{FIELD_OFFSET}}..].as_ptr(),";
code_ += " mem.as_mut_ptr() as *mut u8,";
code_ += " core::mem::size_of::<{{FIELD_TYPE}}>(),";
code_ += " );";
code_ += " mem.assume_init()";
code_ += " }.from_little_endian()";
}
code_ += " }\n";
// Setter.
if (IsStruct(field.value.type)) {
code_.SetValue("FIELD_SIZE",
NumToString(field.value.type.struct_def->bytesize));
code_ += " pub fn set_{{FIELD_NAME}}(&mut self, x: &{{FIELD_TYPE}}) {";
code_ +=
" self.0[{{FIELD_OFFSET}}..{{FIELD_OFFSET}}+{{FIELD_SIZE}}]"
".copy_from_slice(&x.0)";
} else {
code_ += " pub fn set_{{FIELD_NAME}}(&mut self, x: {{FIELD_TYPE}}) {";
code_ += " let x_le = x.to_little_endian();";
code_ += " unsafe {";
code_ += " core::ptr::copy_nonoverlapping(";
code_ += " &x_le as *const {{FIELD_TYPE}} as *const u8,";
code_ += " self.0[{{FIELD_OFFSET}}..].as_mut_ptr(),";
code_ += " core::mem::size_of::<{{FIELD_TYPE}}>(),";
code_ += " );";
code_ += " }";
}
code_ += " }\n";
// Generate a comparison function for this field if it is a key.
if (field.key) { GenKeyFieldMethods(field); }

View File

@ -50,3 +50,9 @@ else
fi
cargo bench $TARGET_FLAG
# RUST_NIGHTLY environment variable set in dockerfile.
if [[ $RUST_NIGHTLY == 1 ]]; then
rustup +nightly component add miri
cargo +nightly miri test -- -Zmiri-disable-isolation
fi

View File

@ -0,0 +1,8 @@
FROM rustlang/rust:nightly-stretch-slim as base
WORKDIR /code
ADD . .
RUN cp flatc_debian_stretch flatc
WORKDIR /code/tests
RUN rustc --version
RUN export RUST_NIGHTLY=1
RUN ./RustTest.sh

View File

@ -110,11 +110,9 @@ impl<'a> flatbuffers::Verifiable for FromInclude {
impl flatbuffers::SimpleToVerifyInSlice for FromInclude {}
// struct Unused, aligned to 4
#[repr(C, align(4))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Unused {
a_: i32,
} // pub struct Unused
pub struct Unused(pub [u8; 4]);
impl std::fmt::Debug for Unused {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Unused")
@ -172,15 +170,37 @@ impl<'a> flatbuffers::Verifiable for Unused {
}
impl Unused {
#[allow(clippy::too_many_arguments)]
pub fn new(_a: i32) -> Self {
Unused {
a_: _a.to_little_endian(),
pub fn new(
a: i32,
) -> Self {
let mut s = Self([0; 4]);
s.set_a(a);
s
}
pub fn a(&self) -> i32 {
let mut mem = core::mem::MaybeUninit::<i32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_a(&mut self, x: i32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i32 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<i32>(),
);
}
}
pub fn a(&self) -> i32 {
self.a_.from_little_endian()
}
}
pub enum TableBOffset {}

View File

@ -639,13 +639,9 @@ impl<'a> flatbuffers::Verifiable for AnyAmbiguousAliases {
impl flatbuffers::SimpleToVerifyInSlice for AnyAmbiguousAliases {}
// struct Test, aligned to 2
#[repr(C, align(2))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Test {
a_: i16,
b_: i8,
padding0__: u8,
} // pub struct Test
pub struct Test(pub [u8; 4]);
impl std::fmt::Debug for Test {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Test")
@ -704,40 +700,72 @@ impl<'a> flatbuffers::Verifiable for Test {
}
impl Test {
#[allow(clippy::too_many_arguments)]
pub fn new(_a: i16, _b: i8) -> Self {
Test {
a_: _a.to_little_endian(),
b_: _b.to_little_endian(),
padding0__: 0,
}
pub fn new(
a: i16,
b: i8,
) -> Self {
let mut s = Self([0; 4]);
s.set_a(a);
s.set_b(b);
s
}
pub const fn get_fully_qualified_name() -> &'static str {
"MyGame.Example.Test"
}
pub fn a(&self) -> i16 {
self.a_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<i16>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i16>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_a(&mut self, x: i16) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i16 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<i16>(),
);
}
}
pub fn b(&self) -> i8 {
self.b_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<i8>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[2..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i8>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_b(&mut self, x: i8) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i8 as *const u8,
self.0[2..].as_mut_ptr(),
core::mem::size_of::<i8>(),
);
}
}
}
// struct Vec3, aligned to 8
#[repr(C, align(8))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Vec3 {
x_: f32,
y_: f32,
z_: f32,
padding0__: u32,
test1_: f64,
test2_: Color,
padding1__: u8,
test3_: Test,
padding2__: u16,
} // pub struct Vec3
pub struct Vec3(pub [u8; 32]);
impl std::fmt::Debug for Vec3 {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Vec3")
@ -800,51 +828,157 @@ impl<'a> flatbuffers::Verifiable for Vec3 {
}
impl Vec3 {
#[allow(clippy::too_many_arguments)]
pub fn new(_x: f32, _y: f32, _z: f32, _test1: f64, _test2: Color, _test3: &Test) -> Self {
Vec3 {
x_: _x.to_little_endian(),
y_: _y.to_little_endian(),
z_: _z.to_little_endian(),
test1_: _test1.to_little_endian(),
test2_: _test2.to_little_endian(),
test3_: *_test3,
padding0__: 0,
padding1__: 0,
padding2__: 0,
}
pub fn new(
x: f32,
y: f32,
z: f32,
test1: f64,
test2: Color,
test3: &Test,
) -> Self {
let mut s = Self([0; 32]);
s.set_x(x);
s.set_y(y);
s.set_z(z);
s.set_test1(test1);
s.set_test2(test2);
s.set_test3(&test3);
s
}
pub const fn get_fully_qualified_name() -> &'static str {
"MyGame.Example.Vec3"
}
pub fn x(&self) -> f32 {
self.x_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_x(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
pub fn y(&self) -> f32 {
self.y_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[4..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_y(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[4..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
pub fn z(&self) -> f32 {
self.z_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[8..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_z(&mut self, x: f32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f32 as *const u8,
self.0[8..].as_mut_ptr(),
core::mem::size_of::<f32>(),
);
}
}
pub fn test1(&self) -> f64 {
self.test1_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<f64>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[16..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<f64>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_test1(&mut self, x: f64) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const f64 as *const u8,
self.0[16..].as_mut_ptr(),
core::mem::size_of::<f64>(),
);
}
}
pub fn test2(&self) -> Color {
self.test2_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<Color>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[24..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<Color>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_test2(&mut self, x: Color) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const Color as *const u8,
self.0[24..].as_mut_ptr(),
core::mem::size_of::<Color>(),
);
}
}
pub fn test3(&self) -> &Test {
&self.test3_
unsafe { &*(self.0[26..].as_ptr() as *const Test) }
}
pub fn set_test3(&mut self, x: &Test) {
self.0[26..26+4].copy_from_slice(&x.0)
}
}
// struct Ability, aligned to 4
#[repr(C, align(4))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct Ability {
id_: u32,
distance_: u32,
} // pub struct Ability
pub struct Ability(pub [u8; 8]);
impl std::fmt::Debug for Ability {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("Ability")
@ -903,20 +1037,43 @@ impl<'a> flatbuffers::Verifiable for Ability {
}
impl Ability {
#[allow(clippy::too_many_arguments)]
pub fn new(_id: u32, _distance: u32) -> Self {
Ability {
id_: _id.to_little_endian(),
distance_: _distance.to_little_endian(),
}
pub fn new(
id: u32,
distance: u32,
) -> Self {
let mut s = Self([0; 8]);
s.set_id(id);
s.set_distance(distance);
s
}
pub const fn get_fully_qualified_name() -> &'static str {
"MyGame.Example.Ability"
}
pub fn id(&self) -> u32 {
self.id_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<u32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<u32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_id(&mut self, x: u32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const u32 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<u32>(),
);
}
}
#[inline]
pub fn key_compare_less_than(&self, o: &Ability) -> bool {
self.id() < o.id()
@ -928,8 +1085,28 @@ impl Ability {
key.cmp(&val)
}
pub fn distance(&self) -> u32 {
self.distance_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<u32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[4..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<u32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_distance(&mut self, x: u32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const u32 as *const u8,
self.0[4..].as_mut_ptr(),
core::mem::size_of::<u32>(),
);
}
}
}
pub enum TestSimpleTableWithEnumOffset {}

View File

@ -115,12 +115,9 @@ impl<'a> flatbuffers::Verifiable for EnumInNestedNS {
impl flatbuffers::SimpleToVerifyInSlice for EnumInNestedNS {}
// struct StructInNestedNS, aligned to 4
#[repr(C, align(4))]
#[repr(transparent)]
#[derive(Clone, Copy, PartialEq)]
pub struct StructInNestedNS {
a_: i32,
b_: i32,
} // pub struct StructInNestedNS
pub struct StructInNestedNS(pub [u8; 8]);
impl std::fmt::Debug for StructInNestedNS {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
f.debug_struct("StructInNestedNS")
@ -179,23 +176,66 @@ impl<'a> flatbuffers::Verifiable for StructInNestedNS {
}
impl StructInNestedNS {
#[allow(clippy::too_many_arguments)]
pub fn new(_a: i32, _b: i32) -> Self {
StructInNestedNS {
a_: _a.to_little_endian(),
b_: _b.to_little_endian(),
}
pub fn new(
a: i32,
b: i32,
) -> Self {
let mut s = Self([0; 8]);
s.set_a(a);
s.set_b(b);
s
}
pub const fn get_fully_qualified_name() -> &'static str {
"NamespaceA.NamespaceB.StructInNestedNS"
}
pub fn a(&self) -> i32 {
self.a_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<i32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[0..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_a(&mut self, x: i32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i32 as *const u8,
self.0[0..].as_mut_ptr(),
core::mem::size_of::<i32>(),
);
}
}
pub fn b(&self) -> i32 {
self.b_.from_little_endian()
let mut mem = core::mem::MaybeUninit::<i32>::uninit();
unsafe {
core::ptr::copy_nonoverlapping(
self.0[4..].as_ptr(),
mem.as_mut_ptr() as *mut u8,
core::mem::size_of::<i32>(),
);
mem.assume_init()
}.from_little_endian()
}
pub fn set_b(&mut self, x: i32) {
let x_le = x.to_little_endian();
unsafe {
core::ptr::copy_nonoverlapping(
&x_le as *const i32 as *const u8,
self.0[4..].as_mut_ptr(),
core::mem::size_of::<i32>(),
);
}
}
}
pub enum TableInNestedNSOffset {}

View File

@ -98,6 +98,7 @@ fn create_serialized_example_with_generated_code(builder: &mut flatbuffers::Flat
my_game::example::finish_monster_buffer(builder, mon);
}
#[cfg(not(miri))] // slow.
fn main() {
// test the allocation tracking:
{

View File

@ -103,6 +103,7 @@ fn validate_monster(flexbuffer: &[u8]) {
// This is in a separate binary than tests because taking over the global allocator is not
// hermetic and not thread safe.
#[cfg(not(miri))] // slow.
fn main() {
let start_up = current_allocs();
@ -133,6 +134,7 @@ fn main() {
}
#[test]
#[cfg(not(miri))] // slow.
fn no_extra_allocations() {
main()
}

View File

@ -108,6 +108,7 @@ fn store_vec_uint_16() {
);
}
#[cfg(not(miri))] // slow.
quickcheck! {
fn qc_f32(x: f32) -> bool {
let fxb = singleton(x);

View File

@ -15,5 +15,6 @@
mod binary_format;
mod interop;
mod other_api;
#[cfg(not(miri))] // slow.
mod qc_serious;
mod rwyw;

View File

@ -13,9 +13,11 @@
// limitations under the License.
use flexbuffers::*;
#[cfg(not(miri))] // slow.
use quickcheck::QuickCheck;
#[test]
#[cfg(not(miri))] // slow.
fn qc_reader_no_crash() {
fn no_crash(xs: Vec<u8>) -> bool {
let r = Reader::get_root(&xs);

View File

@ -14,6 +14,7 @@
// Read what you wrote.
use flexbuffers::*;
#[cfg(not(miri))] // slow.
use quickcheck;
use serde::{Deserialize, Serialize};
@ -33,6 +34,7 @@ impl quickcheck::Arbitrary for NonNullString {
}
}
#[cfg(not(miri))] // slow.
quickcheck! {
fn qc_vec_bool(xs: Vec<bool>) -> bool {
let mut builder = Builder::default();
@ -363,6 +365,8 @@ struct Foo {
c: Vec<u32>,
d: String,
}
#[cfg(not(miri))] // slow.
quickcheck! {
fn serde_foo(a: i8,
b: f64,

View File

@ -16,6 +16,7 @@
*/
#[macro_use]
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
extern crate flexbuffers;
@ -23,6 +24,7 @@ extern crate rand;
extern crate serde;
#[macro_use]
extern crate serde_derive;
#[cfg(not(miri))] // slow.
#[macro_use]
extern crate quickcheck_derive;
@ -246,6 +248,7 @@ fn builder_collapses_into_vec() {
}
#[test]
#[cfg(not(miri))] // slow.
fn verifier_one_byte_errors_do_not_crash() {
let mut b = flatbuffers::FlatBufferBuilder::new();
create_serialized_example_with_library_code(&mut b);
@ -272,6 +275,7 @@ fn verifier_one_byte_errors_do_not_crash() {
}
}
#[test]
#[cfg(not(miri))] // slow.
fn verifier_too_many_tables() {
use my_game::example::*;
let b = &mut flatbuffers::FlatBufferBuilder::new();
@ -296,6 +300,7 @@ fn verifier_too_many_tables() {
assert!(flatbuffers::root_with_opts::<Monster>(&opts, data).is_ok());
}
#[test]
#[cfg(not(miri))] // slow.
fn verifier_apparent_size_too_large() {
use my_game::example::*;
let b = &mut flatbuffers::FlatBufferBuilder::new();
@ -850,11 +855,6 @@ mod generated_code_alignment_and_padding {
assert_eq!(1, ::std::mem::size_of::<my_game::example::Color>());
}
#[test]
fn enum_color_is_aligned_to_1() {
assert_eq!(1, ::std::mem::align_of::<my_game::example::Color>());
}
#[test]
fn union_any_is_1_byte() {
assert_eq!(1, ::std::mem::size_of::<my_game::example::Any>());
@ -864,27 +864,15 @@ mod generated_code_alignment_and_padding {
fn union_any_is_aligned_to_1() {
assert_eq!(1, ::std::mem::align_of::<my_game::example::Any>());
}
#[test]
fn struct_test_is_4_bytes() {
assert_eq!(4, ::std::mem::size_of::<my_game::example::Test>());
}
#[test]
fn struct_test_is_aligned_to_2() {
assert_eq!(2, ::std::mem::align_of::<my_game::example::Test>());
}
#[test]
fn struct_vec3_is_32_bytes() {
assert_eq!(32, ::std::mem::size_of::<my_game::example::Vec3>());
}
#[test]
fn struct_vec3_is_aligned_to_8() {
assert_eq!(8, ::std::mem::align_of::<my_game::example::Vec3>());
}
#[test]
fn struct_vec3_is_written_with_correct_alignment_in_table() {
let b = &mut flatbuffers::FlatBufferBuilder::new();
@ -906,8 +894,8 @@ mod generated_code_alignment_and_padding {
let vec3_ptr = vec3 as *const my_game::example::Vec3 as usize;
assert!(vec3_ptr > start_ptr);
let aln = ::std::mem::align_of::<my_game::example::Vec3>();
assert_eq!((vec3_ptr - start_ptr) % aln, 0);
// Vec3 is aligned to 8 wrt the flatbuffer.
assert_eq!((vec3_ptr - start_ptr) % 8, 0);
}
#[test]
@ -915,11 +903,6 @@ mod generated_code_alignment_and_padding {
assert_eq!(8, ::std::mem::size_of::<my_game::example::Ability>());
}
#[test]
fn struct_ability_is_aligned_to_4() {
assert_eq!(4, ::std::mem::align_of::<my_game::example::Ability>());
}
#[test]
fn struct_ability_is_written_with_correct_alignment_in_table_vector() {
let b = &mut flatbuffers::FlatBufferBuilder::new();
@ -948,14 +931,15 @@ mod generated_code_alignment_and_padding {
for a in abilities.iter().rev() {
let a_ptr = a as *const my_game::example::Ability as usize;
assert!(a_ptr > start_ptr);
let aln = ::std::mem::align_of::<my_game::example::Ability>();
assert_eq!((a_ptr - start_ptr) % aln, 0);
// Vec3 is aligned to 8 wrt the flatbuffer.
assert_eq!((a_ptr - start_ptr) % 8, 0);
}
}
}
#[cfg(test)]
mod roundtrip_byteswap {
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
@ -1003,6 +987,7 @@ mod roundtrip_byteswap {
// fn fuzz_f64() { quickcheck::QuickCheck::new().max_tests(N).quickcheck(prop_f64 as fn(f64)); }
}
#[cfg(not(miri))] // slow.
#[cfg(test)]
mod roundtrip_vectors {
@ -1082,6 +1067,7 @@ mod roundtrip_vectors {
#[cfg(test)]
mod create_vector_direct {
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
@ -1129,6 +1115,7 @@ mod roundtrip_vectors {
#[cfg(test)]
mod string_manual_build {
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
@ -1166,6 +1153,7 @@ mod roundtrip_vectors {
#[cfg(test)]
mod string_helper_build {
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
@ -1196,9 +1184,11 @@ mod roundtrip_vectors {
#[cfg(test)]
mod ubyte {
#[cfg(not(miri))] // slow.
extern crate quickcheck;
extern crate flatbuffers;
#[cfg(not(miri))] // slow.
#[test]
fn fuzz_manual_build() {
fn prop(vec: Vec<u8>) {
@ -1254,11 +1244,13 @@ mod roundtrip_table {
use std::collections::HashMap;
extern crate flatbuffers;
#[cfg(not(miri))] // slow.
extern crate quickcheck;
use super::LCG;
#[test]
#[cfg(not(miri))] // slow.
fn table_of_mixed_scalars_fuzz() {
// Values we're testing against: chosen to ensure no bits get chopped
// off anywhere, and also be different from eachother.
@ -1366,6 +1358,7 @@ mod roundtrip_table {
}
#[test]
#[cfg(not(miri))] // slow.
fn table_of_byte_strings_fuzz() {
fn prop(vec: Vec<Vec<u8>>) {
use flatbuffers::field_index_to_field_offset as fi2fo;
@ -1402,6 +1395,7 @@ mod roundtrip_table {
}
#[test]
#[cfg(not(miri))] // slow.
fn fuzz_table_of_strings() {
fn prop(vec: Vec<String>) {
use flatbuffers::field_index_to_field_offset as fi2fo;
@ -1433,8 +1427,10 @@ mod roundtrip_table {
quickcheck::QuickCheck::new().max_tests(n).quickcheck(prop as fn(Vec<String>));
}
#[cfg(not(miri))] // slow.
mod table_of_vectors_of_scalars {
extern crate flatbuffers;
#[cfg(not(miri))] // slow.
extern crate quickcheck;
const N: u64 = 20;
@ -1515,9 +1511,11 @@ mod roundtrip_table {
}
}
#[cfg(not(miri))] // slow.
#[cfg(test)]
mod roundtrip_scalars {
extern crate flatbuffers;
#[cfg(not(miri))] // slow.
extern crate quickcheck;
const N: u64 = 1000;
@ -1558,8 +1556,10 @@ mod roundtrip_scalars {
}
#[cfg(test)]
#[cfg(not(miri))] // slow.
mod roundtrip_push_follow_scalars {
extern crate flatbuffers;
#[cfg(not(miri))] // slow.
extern crate quickcheck;
use flatbuffers::Push;
@ -1685,6 +1685,7 @@ mod write_and_read_examples {
}
#[test]
#[cfg(not(miri))] // slow.
fn generated_code_creates_correct_example_repeatedly_with_reset() {
let b = &mut flatbuffers::FlatBufferBuilder::new();
for _ in 0..100 {
@ -1706,6 +1707,7 @@ mod write_and_read_examples {
}
#[test]
#[cfg(not(miri))] // slow.
fn library_code_creates_correct_example_repeatedly_with_reset() {
let b = &mut flatbuffers::FlatBufferBuilder::new();
for _ in 0..100 {
@ -2031,14 +2033,6 @@ mod follow_impls {
assert_eq!(off.self_follow(&vec[..], 4).safe_slice(), &[1, 2, 3][..]);
}
#[cfg(target_endian = "little")]
#[test]
fn to_slice_of_u16() {
let vec: Vec<u8> = vec![255, 255, 255, 255, 2, 0, 0, 0, 1, 2, 3, 4];
let off: flatbuffers::FollowStart<&[u16]> = flatbuffers::FollowStart::new();
assert_eq!(off.self_follow(&vec[..], 4), &vec![513, 1027][..]);
}
#[test]
fn to_vector_of_u16() {
let vec: Vec<u8> = vec![255, 255, 255, 255, 2, 0, 0, 0, 1, 2, 3, 4];
@ -2371,6 +2365,7 @@ mod vtable_deduplication {
]);
}
#[cfg(not(miri))] // slow.
#[test]
fn many_identical_tables_use_few_vtables() {
let mut b = flatbuffers::FlatBufferBuilder::new();