Write storage type into PARTMAP

This commit adds a storage_type segment to the PARTMAP disk file. This
contains information about the storage type of the table.
Is it volatile? Is it persistent? 8-bits were added for future
improvements.
next
Sayan Nandan 3 years ago
parent 1d403c0d1a
commit 66b9ac27af

@ -58,8 +58,8 @@
use crate::coredb::array::Array; use crate::coredb::array::Array;
use crate::coredb::htable::Coremap; use crate::coredb::htable::Coremap;
use crate::coredb::table::Table;
use crate::coredb::SnapshotStatus; use crate::coredb::SnapshotStatus;
use crate::kvengine::KVEngine;
use core::mem::MaybeUninit; use core::mem::MaybeUninit;
use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicBool;
use std::sync::Arc; use std::sync::Arc;
@ -213,12 +213,12 @@ impl Keyspace {
// add the default table // add the default table
ht.true_if_insert( ht.true_if_insert(
unsafe_objectid_from_slice!("default"), unsafe_objectid_from_slice!("default"),
Arc::new(Table::KV(KVEngine::default())), Arc::new(Table::new_default_kve()),
); );
// add the system table // add the system table
ht.true_if_insert( ht.true_if_insert(
unsafe_objectid_from_slice!("_system"), unsafe_objectid_from_slice!("_system"),
Arc::new(Table::KV(KVEngine::default())), Arc::new(Table::new_default_kve()),
); );
ht ht
}, },
@ -242,7 +242,7 @@ impl Keyspace {
pub fn create_table(&self, table_identifier: ObjectID, table_type: TableType) -> bool { pub fn create_table(&self, table_identifier: ObjectID, table_type: TableType) -> bool {
self.tables.true_if_insert(table_identifier, { self.tables.true_if_insert(table_identifier, {
match table_type { match table_type {
TableType::KeyValue => Arc::new(Table::KV(KVEngine::default())), TableType::KeyValue => Arc::new(Table::new_default_kve()),
} }
}) })
} }
@ -310,24 +310,3 @@ fn test_keyspace_try_delete_protected_table() {
DdlError::ProtectedObject DdlError::ProtectedObject
); );
} }
// same 8 byte ptrs; any chance of optimizations?
#[derive(Debug)]
/// The underlying table type. This is the place for the other data models (soon!)
pub enum Table {
/// a key/value store
KV(KVEngine),
}
impl Table {
/// Get the key/value store if the table is a key/value store
pub const fn get_kvstore(&self) -> Option<&KVEngine> {
#[allow(irrefutable_let_patterns)]
if let Self::KV(kvs) = self {
Some(kvs)
} else {
None
}
}
}

@ -45,6 +45,7 @@ pub mod iarray;
pub mod lazy; pub mod lazy;
pub mod lock; pub mod lock;
pub mod memstore; pub mod memstore;
pub mod table;
/// This is a thread-safe database handle, which on cloning simply /// This is a thread-safe database handle, which on cloning simply
/// gives another atomic reference to the `shared` which is a `Shared` object /// gives another atomic reference to the `shared` which is a `Shared` object

@ -0,0 +1,116 @@
/*
* Created on Sat Jul 17 2021
*
* This file is a part of Skytable
* Skytable (formerly known as TerrabaseDB or Skybase) is a free and open-source
* NoSQL database written by Sayan Nandan ("the Author") with the
* vision to provide flexibility in data modelling without compromising
* on performance, queryability or scalability.
*
* Copyright (c) 2021, Sayan Nandan <ohsayan@outlook.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
use crate::coredb::htable::Coremap;
use crate::coredb::Data;
use crate::kvengine::KVEngine;
#[derive(Debug)]
pub enum DataModel {
KV(KVEngine),
}
// same 8 byte ptrs; any chance of optimizations?
#[derive(Debug)]
/// The underlying table type. This is the place for the other data models (soon!)
pub struct Table {
/// a key/value store
model_store: DataModel,
volatile: bool,
}
impl Table {
/// Get the key/value store if the table is a key/value store
pub const fn get_kvstore(&self) -> Option<&KVEngine> {
#[allow(irrefutable_let_patterns)]
if let DataModel::KV(kvs) = &self.model_store {
Some(&kvs)
} else {
None
}
}
pub const fn storage_type(&self) -> u8 {
self.volatile as u8
}
pub fn kve_from_model_code_and_data(
modelcode: u8,
volatile: bool,
data: Coremap<Data, Data>,
) -> Option<Self> {
let data = match modelcode {
0 => KVEngine::init_with_data(false, false, data),
1 => KVEngine::init_with_data(false, true, data),
2 => KVEngine::init_with_data(true, true, data),
3 => KVEngine::init_with_data(true, false, data),
_ => return None,
};
Some(Self {
model_store: DataModel::KV(data),
volatile,
})
}
pub fn kve_from_model_code(modelcode: u8) -> Option<Self> {
Self::kve_from_model_code_and_data(modelcode, false, Coremap::new())
}
pub fn new_default_kve() -> Self {
match Self::kve_from_model_code(0) {
Some(k) => k,
None => unsafe { core::hint::unreachable_unchecked() },
}
}
pub fn get_model_code(&self) -> u8 {
match &self.model_store {
DataModel::KV(kvs) => {
/*
bin,bin => 0
bin,str => 1
str,str => 2
str,bin => 3
*/
let (kbin, vbin) = kvs.get_encoding();
if kbin {
if vbin {
// both k + v are str
2
} else {
// only k is str
3
}
} else if vbin {
// k is bin, v is str
1
} else {
// both are bin
0
}
}
}
}
pub fn get_model_ref(&self) -> &DataModel {
&self.model_store
}
}

@ -101,12 +101,21 @@ impl Default for KVEngine {
impl KVEngine { impl KVEngine {
/// Create a new in-memory KVEngine with the specified encoding schemes /// Create a new in-memory KVEngine with the specified encoding schemes
pub fn init(encoded_k: bool, encoded_v: bool) -> Self { pub fn init(encoded_k: bool, encoded_v: bool) -> Self {
Self::init_with_data(encoded_k, encoded_v, Coremap::new())
}
pub fn init_with_data(encoded_k: bool, encoded_v: bool, table: Coremap<Data, Data>) -> Self {
Self { Self {
table: Coremap::new(), table,
encoded_k: AtomicBool::new(encoded_k), encoded_k: AtomicBool::new(encoded_k),
encoded_v: AtomicBool::new(encoded_v), encoded_v: AtomicBool::new(encoded_v),
} }
} }
pub fn get_encoding(&self) -> (bool, bool) {
(
self.encoded_k.load(ORD_RELAXED),
self.encoded_v.load(ORD_RELAXED),
)
}
pub fn __get_inner_ref(&self) -> &Coremap<Data, Data> { pub fn __get_inner_ref(&self) -> &Coremap<Data, Data> {
&self.table &self.table
} }

@ -32,7 +32,7 @@
use crate::coredb::memstore::Keyspace; use crate::coredb::memstore::Keyspace;
use crate::coredb::memstore::Memstore; use crate::coredb::memstore::Memstore;
use crate::coredb::memstore::ObjectID; use crate::coredb::memstore::ObjectID;
use crate::coredb::memstore::Table; use crate::coredb::table::{DataModel, Table};
use crate::storage::interface::DIR_KSROOT; use crate::storage::interface::DIR_KSROOT;
use std::fs::{self, File}; use std::fs::{self, File};
use std::io::Result as IoResult; use std::io::Result as IoResult;
@ -50,10 +50,13 @@ macro_rules! tbl_path {
pub fn flush_table(tableid: &ObjectID, ksid: &ObjectID, table: &Table) -> IoResult<()> { pub fn flush_table(tableid: &ObjectID, ksid: &ObjectID, table: &Table) -> IoResult<()> {
let path = tbl_path!(tableid, ksid); let path = tbl_path!(tableid, ksid);
let mut file = File::create(&path)?; let mut file = File::create(&path)?;
match table { let modelcode = table.get_model_code();
Table::KV(kve) => { match table.get_model_ref() {
super::interface::serialize_map_into_slow_buffer(&mut file, kve.__get_inner_ref())? DataModel::KV(kve) => super::interface::serialize_map_into_slow_buffer(
} &mut file,
kve.__get_inner_ref(),
modelcode,
)?,
} }
file.sync_all()?; file.sync_all()?;
fs::rename(&path, &path[..path.len() - 1]) fs::rename(&path, &path[..path.len() - 1])
@ -71,7 +74,7 @@ pub fn flush_keyspace(ksid: &ObjectID, keyspace: &Keyspace) -> IoResult<()> {
pub fn flush_partmap(ksid: &ObjectID, keyspace: &Keyspace) -> IoResult<()> { pub fn flush_partmap(ksid: &ObjectID, keyspace: &Keyspace) -> IoResult<()> {
let path = unsafe { concat_str!(DIR_KSROOT, "/", ksid.as_str(), "/", "PARTMAP_") }; let path = unsafe { concat_str!(DIR_KSROOT, "/", ksid.as_str(), "/", "PARTMAP_") };
let mut file = File::create(&path)?; let mut file = File::create(&path)?;
super::interface::serialize_set_into_slow_buffer(&mut file, &keyspace.tables)?; super::interface::serialize_partmap_into_slow_buffer(&mut file, keyspace)?;
file.sync_all()?; file.sync_all()?;
fs::rename(&path, &path[..path.len() - 1])?; fs::rename(&path, &path[..path.len() - 1])?;
Ok(()) Ok(())

@ -28,8 +28,8 @@
use crate::coredb::htable::Coremap; use crate::coredb::htable::Coremap;
use crate::coredb::htable::Data; use crate::coredb::htable::Data;
use crate::coredb::memstore::Keyspace;
use crate::coredb::memstore::Memstore; use crate::coredb::memstore::Memstore;
use core::hash::Hash;
use std::io::Result as IoResult; use std::io::Result as IoResult;
use std::io::{BufWriter, Write}; use std::io::{BufWriter, Write};
@ -82,22 +82,17 @@ pub fn create_tree(memroot: Memstore) -> IoResult<()> {
pub fn serialize_map_into_slow_buffer<T: Write>( pub fn serialize_map_into_slow_buffer<T: Write>(
buffer: &mut T, buffer: &mut T,
map: &Coremap<Data, Data>, map: &Coremap<Data, Data>,
model_code: u8,
) -> IoResult<()> { ) -> IoResult<()> {
let mut buffer = BufWriter::new(buffer); let mut buffer = BufWriter::new(buffer);
super::raw_serialize_map(map, &mut buffer)?; super::se::raw_serialize_map(map, &mut buffer, model_code)?;
buffer.flush()?; buffer.flush()?;
Ok(()) Ok(())
} }
pub fn serialize_set_into_slow_buffer<T: Write, K, V>( pub fn serialize_partmap_into_slow_buffer<T: Write>(buffer: &mut T, ks: &Keyspace) -> IoResult<()> {
buffer: &mut T,
set: &Coremap<K, V>,
) -> IoResult<()>
where
K: Eq + Hash + AsRef<[u8]>,
{
let mut buffer = BufWriter::new(buffer); let mut buffer = BufWriter::new(buffer);
super::raw_serialize_set(set, &mut buffer)?; super::se::raw_serialize_partmap(&mut buffer, ks)?;
buffer.flush()?; buffer.flush()?;
Ok(()) Ok(())
} }

@ -184,20 +184,31 @@ unsafe fn raw_byte_repr<'a, T: 'a>(len: &'a T) -> &'a [u8] {
} }
} }
mod se {
use super::*;
use crate::coredb::memstore::Keyspace;
/// Serialize a map into a _writable_ thing /// Serialize a map into a _writable_ thing
pub fn serialize_map(map: &Coremap<Data, Data>) -> Result<Vec<u8>, std::io::Error> { pub fn serialize_map(
map: &Coremap<Data, Data>,
model_code: u8,
) -> Result<Vec<u8>, std::io::Error> {
/* /*
[LEN:8B][KLEN:8B|VLEN:8B][K][V][KLEN:8B][VLEN:8B]... [1B: Model Mark][LEN:8B][KLEN:8B|VLEN:8B][K][V][KLEN:8B][VLEN:8B]...
*/ */
// write the len header first // write the len header first
let mut w = Vec::with_capacity(128); let mut w = Vec::with_capacity(128);
self::raw_serialize_map(map, &mut w)?; self::raw_serialize_map(map, &mut w, model_code)?;
Ok(w) Ok(w)
} }
/// Serialize a map and write it to a provided buffer /// Serialize a map and write it to a provided buffer
pub fn raw_serialize_map<W: Write>(map: &Coremap<Data, Data>, w: &mut W) -> std::io::Result<()> { pub fn raw_serialize_map<W: Write>(
map: &Coremap<Data, Data>,
w: &mut W,
model_code: u8,
) -> std::io::Result<()> {
unsafe { unsafe {
w.write_all(raw_byte_repr(&model_code))?;
w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?; w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?;
// now the keys and values // now the keys and values
for kv in map.iter() { for kv in map.iter() {
@ -229,6 +240,31 @@ where
Ok(()) Ok(())
} }
/// Generate a partition map for the given keyspace
/// ```text
/// [8B: EXTENT]([8B: LEN][?B: PARTITION ID][1B: Storage type])*
/// ```
pub fn raw_serialize_partmap<W: Write>(w: &mut W, keyspace: &Keyspace) -> std::io::Result<()> {
unsafe {
// extent
w.write_all(raw_byte_repr(&to_64bit_little_endian!(keyspace
.tables
.len())))?;
for table in keyspace.tables.iter() {
// partition ID
w.write_all(raw_byte_repr(&to_64bit_little_endian!(table.key().len())))?;
// now storage type
w.write_all(raw_byte_repr(&table.storage_type()))?;
}
}
Ok(())
}
}
mod de {
use super::*;
use std::collections::HashMap;
pub trait DeserializeFrom { pub trait DeserializeFrom {
fn is_expected_len(clen: usize) -> bool; fn is_expected_len(clen: usize) -> bool;
fn from_slice(slice: &[u8]) -> Self; fn from_slice(slice: &[u8]) -> Self;
@ -296,12 +332,66 @@ where
} }
} }
/// Deserialize a file that contains a serialized map /// Deserializes a map-like set which has an 1B _bytemark_ for every entry
pub fn deserialize_map(data: Vec<u8>) -> Option<Coremap<Data, Data>> { pub fn deserialize_set_ctype_bytemark<T>(data: &[u8]) -> Option<HashMap<T, u8>>
where
T: DeserializeFrom + Eq + Hash,
{
// First read the length header // First read the length header
if data.len() < 8 { if data.len() < 8 {
// so the file doesn't even have the length header? noice, just return // so the file doesn't even have the length header? noice, just return
None None
} else {
unsafe {
// so we have 8B. Just unsafe access and transmute it
let len = transmute_len(data.as_ptr());
let mut set = HashMap::with_capacity(len);
// this is what we have left: [KLEN:8B]*
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(8)) >= end_ptr {
// not enough space and even if there is a len
// there is no value. This is even true for ZSTs
return None;
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
if (ptr.add(lenkey + 1)) > end_ptr {
// not enough data left
return None;
}
if !T::is_expected_len(lenkey) {
return None;
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = T::from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
let bytemark = ptr::read(ptr);
ptr = ptr.add(1);
// push it in
if set.insert(key, bytemark).is_some() {
// repeat?; that's not what we wanted
return None;
}
}
if ptr == end_ptr {
Some(set)
} else {
// nope, someone gave us more data
None
}
}
}
}
/// Deserialize a file that contains a serialized map. This also returns the model code
pub fn deserialize_map(data: Vec<u8>) -> Option<(Coremap<Data, Data>, u8)> {
// First read the length header
if data.len() < 9 {
// so the file doesn't even have the length/model header? noice, just return
None
} else { } else {
unsafe { unsafe {
/* /*
@ -312,13 +402,22 @@ pub fn deserialize_map(data: Vec<u8>) -> Option<Coremap<Data, Data>> {
can guarantee that we won't ever read incorrect lengths of data can guarantee that we won't ever read incorrect lengths of data
and we won't read into others' memory (or corrupt our own) and we won't read into others' memory (or corrupt our own)
*/ */
let mut ptr = data.as_ptr();
let modelcode: u8 = ptr::read(ptr);
// model check
if modelcode > 3 {
// this model isn't supposed to have more than 3. Corrupted data
return None;
}
ptr = ptr.add(1);
// so we have 8B. Just unsafe access and transmute it; nobody cares // so we have 8B. Just unsafe access and transmute it; nobody cares
let len = transmute_len(data.as_ptr()); let len = transmute_len(ptr);
// move 8 bytes ahead since we're done with len
ptr = ptr.add(8);
let hm = Coremap::with_capacity(len); let hm = Coremap::with_capacity(len);
// this is what we have left: [KLEN:8B][VLEN:8B] // this is what we have left: [KLEN:8B][VLEN:8B]
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len()); let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len { for _ in 0..len {
if (ptr.add(16)) >= end_ptr { if (ptr.add(16)) >= end_ptr {
@ -344,7 +443,7 @@ pub fn deserialize_map(data: Vec<u8>) -> Option<Coremap<Data, Data>> {
hm.upsert(key, val); hm.upsert(key, val);
} }
if ptr == end_ptr { if ptr == end_ptr {
Some(hm) Some((hm, modelcode))
} else { } else {
// nope, someone gave us more data // nope, someone gave us more data
None None
@ -398,3 +497,4 @@ unsafe fn transmute_len(start_ptr: *const u8) -> usize {
}); });
}); });
} }
}

@ -63,7 +63,7 @@ pub(super) fn raw_generate_preload<W: Write>(w: &mut W, store: &Memstore) -> IoR
// generate the meta segment // generate the meta segment
#[allow(clippy::identity_op)] #[allow(clippy::identity_op)]
w.write_all(&[META_SEGMENT])?; w.write_all(&[META_SEGMENT])?;
super::raw_serialize_set(&store.keyspaces, w)?; super::se::raw_serialize_set(&store.keyspaces, w)?;
Ok(()) Ok(())
} }
@ -72,7 +72,7 @@ pub(super) fn raw_generate_preload<W: Write>(w: &mut W, store: &Memstore) -> IoR
/// ([8B: Len][?B: Label])* /// ([8B: Len][?B: Label])*
/// ``` /// ```
pub(super) fn raw_generate_partfile<W: Write>(w: &mut W, store: &Keyspace) -> IoResult<()> { pub(super) fn raw_generate_partfile<W: Write>(w: &mut W, store: &Keyspace) -> IoResult<()> {
super::raw_serialize_set(&store.tables, w) super::se::raw_serialize_set(&store.tables, w)
} }
/// Reads the preload file and returns a set /// Reads the preload file and returns a set
@ -89,7 +89,7 @@ pub(super) fn read_preload_raw(preload: Vec<u8>) -> IoResult<HashSet<ObjectID>>
} }
} }
// all checks complete; time to decode // all checks complete; time to decode
let ret = super::deserialize_set_ctype(&preload[1..]); let ret = super::de::deserialize_set_ctype(&preload[1..]);
match ret { match ret {
Some(ret) => Ok(ret), Some(ret) => Ok(ret),
_ => Err(IoError::from(ErrorKind::InvalidData)), _ => Err(IoError::from(ErrorKind::InvalidData)),
@ -98,7 +98,7 @@ pub(super) fn read_preload_raw(preload: Vec<u8>) -> IoResult<HashSet<ObjectID>>
/// Reads the partfile and returns a set /// Reads the partfile and returns a set
pub fn read_partfile_raw(partfile: Vec<u8>) -> IoResult<HashSet<ObjectID>> { pub fn read_partfile_raw(partfile: Vec<u8>) -> IoResult<HashSet<ObjectID>> {
match super::deserialize_set_ctype(&partfile) { match super::de::deserialize_set_ctype(&partfile) {
Some(s) => Ok(s), Some(s) => Ok(s),
None => Err(IoError::from(ErrorKind::InvalidData)), None => Err(IoError::from(ErrorKind::InvalidData)),
} }

@ -29,9 +29,10 @@ use super::*;
#[test] #[test]
fn test_serialize_deserialize_empty() { fn test_serialize_deserialize_empty() {
let cmap = Coremap::new(); let cmap = Coremap::new();
let ser = serialize_map(&cmap).unwrap(); let ser = se::serialize_map(&cmap, 0).unwrap();
let de = deserialize_map(ser).unwrap(); let (de, model_code) = de::deserialize_map(ser).unwrap();
assert!(de.len() == 0); assert!(de.len() == 0);
assert_eq!(0, model_code);
} }
#[test] #[test]
@ -39,12 +40,13 @@ fn test_ser_de_few_elements() {
let cmap = Coremap::new(); let cmap = Coremap::new();
cmap.upsert("sayan".into(), "writes code".into()); cmap.upsert("sayan".into(), "writes code".into());
cmap.upsert("supersayan".into(), "writes super code".into()); cmap.upsert("supersayan".into(), "writes super code".into());
let ser = serialize_map(&cmap).unwrap(); let ser = se::serialize_map(&cmap, 0).unwrap();
let de = deserialize_map(ser).unwrap(); let (de, modelcode) = de::deserialize_map(ser).unwrap();
assert!(de.len() == cmap.len()); assert!(de.len() == cmap.len());
assert!(de assert!(de
.iter() .iter()
.all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value()))); .all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value())));
assert_eq!(modelcode, 0);
} }
cfg_test!( cfg_test!(
@ -64,12 +66,13 @@ cfg_test!(
.zip(values.iter()) .zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned()))) .map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect(); .collect();
let ser = serialize_map(&cmap).unwrap(); let ser = se::serialize_map(&cmap, 0).unwrap();
let de = deserialize_map(ser).unwrap(); let (de, modelcode) = de::deserialize_map(ser).unwrap();
assert!(de assert!(de
.iter() .iter()
.all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value()))); .all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value())));
assert!(de.len() == cmap.len()); assert!(de.len() == cmap.len());
assert_eq!(modelcode, 0);
} }
#[test] #[test]
@ -86,11 +89,11 @@ cfg_test!(
.zip(values.iter()) .zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned()))) .map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect(); .collect();
let mut se = serialize_map(&cmap).unwrap(); let mut se = se::serialize_map(&cmap, 0).unwrap();
// random chop // random chop
se.truncate(124); se.truncate(124);
// corrupted // corrupted
assert!(deserialize_map(se).is_none()); assert!(de::deserialize_map(se).is_none());
} }
#[test] #[test]
fn test_ser_de_excess_bytes() { fn test_ser_de_excess_bytes() {
@ -110,11 +113,11 @@ cfg_test!(
.zip(values.iter()) .zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned()))) .map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect(); .collect();
let mut se = serialize_map(&cmap).unwrap(); let mut se = se::serialize_map(&cmap, 0).unwrap();
// random patch // random patch
let patch: Vec<u8> = (0u16..500u16).into_iter().map(|v| (v >> 7) as u8).collect(); let patch: Vec<u8> = (0u16..500u16).into_iter().map(|v| (v >> 7) as u8).collect();
se.extend(patch); se.extend(patch);
assert!(deserialize_map(se).is_none()); assert!(de::deserialize_map(se).is_none());
} }
); );
@ -125,7 +128,7 @@ fn test_runtime_panic_32bit_or_lower() {
let max = u64::MAX; let max = u64::MAX;
let byte_stream = unsafe { raw_byte_repr(&max).to_owned() }; let byte_stream = unsafe { raw_byte_repr(&max).to_owned() };
let ptr = byte_stream.as_ptr(); let ptr = byte_stream.as_ptr();
unsafe { transmute_len(ptr) }; unsafe { de::transmute_len(ptr) };
} }
mod interface_tests { mod interface_tests {

Loading…
Cancel
Save