Write storage type into PARTMAP

This commit adds a storage_type segment to the PARTMAP disk file. This
contains information about the storage type of the table.
Is it volatile? Is it persistent? 8-bits were added for future
improvements.
next
Sayan Nandan 3 years ago
parent 1d403c0d1a
commit 66b9ac27af

@ -58,8 +58,8 @@
use crate::coredb::array::Array;
use crate::coredb::htable::Coremap;
use crate::coredb::table::Table;
use crate::coredb::SnapshotStatus;
use crate::kvengine::KVEngine;
use core::mem::MaybeUninit;
use std::sync::atomic::AtomicBool;
use std::sync::Arc;
@ -213,12 +213,12 @@ impl Keyspace {
// add the default table
ht.true_if_insert(
unsafe_objectid_from_slice!("default"),
Arc::new(Table::KV(KVEngine::default())),
Arc::new(Table::new_default_kve()),
);
// add the system table
ht.true_if_insert(
unsafe_objectid_from_slice!("_system"),
Arc::new(Table::KV(KVEngine::default())),
Arc::new(Table::new_default_kve()),
);
ht
},
@ -242,7 +242,7 @@ impl Keyspace {
pub fn create_table(&self, table_identifier: ObjectID, table_type: TableType) -> bool {
self.tables.true_if_insert(table_identifier, {
match table_type {
TableType::KeyValue => Arc::new(Table::KV(KVEngine::default())),
TableType::KeyValue => Arc::new(Table::new_default_kve()),
}
})
}
@ -310,24 +310,3 @@ fn test_keyspace_try_delete_protected_table() {
DdlError::ProtectedObject
);
}
// same 8 byte ptrs; any chance of optimizations?
#[derive(Debug)]
/// The underlying table type. This is the place for the other data models (soon!)
pub enum Table {
/// a key/value store
KV(KVEngine),
}
impl Table {
/// Get the key/value store if the table is a key/value store
pub const fn get_kvstore(&self) -> Option<&KVEngine> {
#[allow(irrefutable_let_patterns)]
if let Self::KV(kvs) = self {
Some(kvs)
} else {
None
}
}
}

@ -45,6 +45,7 @@ pub mod iarray;
pub mod lazy;
pub mod lock;
pub mod memstore;
pub mod table;
/// This is a thread-safe database handle, which on cloning simply
/// gives another atomic reference to the `shared` which is a `Shared` object

@ -0,0 +1,116 @@
/*
* Created on Sat Jul 17 2021
*
* This file is a part of Skytable
* Skytable (formerly known as TerrabaseDB or Skybase) is a free and open-source
* NoSQL database written by Sayan Nandan ("the Author") with the
* vision to provide flexibility in data modelling without compromising
* on performance, queryability or scalability.
*
* Copyright (c) 2021, Sayan Nandan <ohsayan@outlook.com>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <https://www.gnu.org/licenses/>.
*
*/
use crate::coredb::htable::Coremap;
use crate::coredb::Data;
use crate::kvengine::KVEngine;
#[derive(Debug)]
pub enum DataModel {
KV(KVEngine),
}
// same 8 byte ptrs; any chance of optimizations?
#[derive(Debug)]
/// The underlying table type. This is the place for the other data models (soon!)
pub struct Table {
/// a key/value store
model_store: DataModel,
volatile: bool,
}
impl Table {
/// Get the key/value store if the table is a key/value store
pub const fn get_kvstore(&self) -> Option<&KVEngine> {
#[allow(irrefutable_let_patterns)]
if let DataModel::KV(kvs) = &self.model_store {
Some(&kvs)
} else {
None
}
}
pub const fn storage_type(&self) -> u8 {
self.volatile as u8
}
pub fn kve_from_model_code_and_data(
modelcode: u8,
volatile: bool,
data: Coremap<Data, Data>,
) -> Option<Self> {
let data = match modelcode {
0 => KVEngine::init_with_data(false, false, data),
1 => KVEngine::init_with_data(false, true, data),
2 => KVEngine::init_with_data(true, true, data),
3 => KVEngine::init_with_data(true, false, data),
_ => return None,
};
Some(Self {
model_store: DataModel::KV(data),
volatile,
})
}
pub fn kve_from_model_code(modelcode: u8) -> Option<Self> {
Self::kve_from_model_code_and_data(modelcode, false, Coremap::new())
}
pub fn new_default_kve() -> Self {
match Self::kve_from_model_code(0) {
Some(k) => k,
None => unsafe { core::hint::unreachable_unchecked() },
}
}
pub fn get_model_code(&self) -> u8 {
match &self.model_store {
DataModel::KV(kvs) => {
/*
bin,bin => 0
bin,str => 1
str,str => 2
str,bin => 3
*/
let (kbin, vbin) = kvs.get_encoding();
if kbin {
if vbin {
// both k + v are str
2
} else {
// only k is str
3
}
} else if vbin {
// k is bin, v is str
1
} else {
// both are bin
0
}
}
}
}
pub fn get_model_ref(&self) -> &DataModel {
&self.model_store
}
}

@ -101,12 +101,21 @@ impl Default for KVEngine {
impl KVEngine {
/// Create a new in-memory KVEngine with the specified encoding schemes
pub fn init(encoded_k: bool, encoded_v: bool) -> Self {
Self::init_with_data(encoded_k, encoded_v, Coremap::new())
}
pub fn init_with_data(encoded_k: bool, encoded_v: bool, table: Coremap<Data, Data>) -> Self {
Self {
table: Coremap::new(),
table,
encoded_k: AtomicBool::new(encoded_k),
encoded_v: AtomicBool::new(encoded_v),
}
}
pub fn get_encoding(&self) -> (bool, bool) {
(
self.encoded_k.load(ORD_RELAXED),
self.encoded_v.load(ORD_RELAXED),
)
}
pub fn __get_inner_ref(&self) -> &Coremap<Data, Data> {
&self.table
}

@ -32,7 +32,7 @@
use crate::coredb::memstore::Keyspace;
use crate::coredb::memstore::Memstore;
use crate::coredb::memstore::ObjectID;
use crate::coredb::memstore::Table;
use crate::coredb::table::{DataModel, Table};
use crate::storage::interface::DIR_KSROOT;
use std::fs::{self, File};
use std::io::Result as IoResult;
@ -50,10 +50,13 @@ macro_rules! tbl_path {
pub fn flush_table(tableid: &ObjectID, ksid: &ObjectID, table: &Table) -> IoResult<()> {
let path = tbl_path!(tableid, ksid);
let mut file = File::create(&path)?;
match table {
Table::KV(kve) => {
super::interface::serialize_map_into_slow_buffer(&mut file, kve.__get_inner_ref())?
}
let modelcode = table.get_model_code();
match table.get_model_ref() {
DataModel::KV(kve) => super::interface::serialize_map_into_slow_buffer(
&mut file,
kve.__get_inner_ref(),
modelcode,
)?,
}
file.sync_all()?;
fs::rename(&path, &path[..path.len() - 1])
@ -71,7 +74,7 @@ pub fn flush_keyspace(ksid: &ObjectID, keyspace: &Keyspace) -> IoResult<()> {
pub fn flush_partmap(ksid: &ObjectID, keyspace: &Keyspace) -> IoResult<()> {
let path = unsafe { concat_str!(DIR_KSROOT, "/", ksid.as_str(), "/", "PARTMAP_") };
let mut file = File::create(&path)?;
super::interface::serialize_set_into_slow_buffer(&mut file, &keyspace.tables)?;
super::interface::serialize_partmap_into_slow_buffer(&mut file, keyspace)?;
file.sync_all()?;
fs::rename(&path, &path[..path.len() - 1])?;
Ok(())

@ -28,8 +28,8 @@
use crate::coredb::htable::Coremap;
use crate::coredb::htable::Data;
use crate::coredb::memstore::Keyspace;
use crate::coredb::memstore::Memstore;
use core::hash::Hash;
use std::io::Result as IoResult;
use std::io::{BufWriter, Write};
@ -82,22 +82,17 @@ pub fn create_tree(memroot: Memstore) -> IoResult<()> {
pub fn serialize_map_into_slow_buffer<T: Write>(
buffer: &mut T,
map: &Coremap<Data, Data>,
model_code: u8,
) -> IoResult<()> {
let mut buffer = BufWriter::new(buffer);
super::raw_serialize_map(map, &mut buffer)?;
super::se::raw_serialize_map(map, &mut buffer, model_code)?;
buffer.flush()?;
Ok(())
}
pub fn serialize_set_into_slow_buffer<T: Write, K, V>(
buffer: &mut T,
set: &Coremap<K, V>,
) -> IoResult<()>
where
K: Eq + Hash + AsRef<[u8]>,
{
pub fn serialize_partmap_into_slow_buffer<T: Write>(buffer: &mut T, ks: &Keyspace) -> IoResult<()> {
let mut buffer = BufWriter::new(buffer);
super::raw_serialize_set(set, &mut buffer)?;
super::se::raw_serialize_partmap(&mut buffer, ks)?;
buffer.flush()?;
Ok(())
}

@ -184,217 +184,317 @@ unsafe fn raw_byte_repr<'a, T: 'a>(len: &'a T) -> &'a [u8] {
}
}
/// Serialize a map into a _writable_ thing
pub fn serialize_map(map: &Coremap<Data, Data>) -> Result<Vec<u8>, std::io::Error> {
/*
[LEN:8B][KLEN:8B|VLEN:8B][K][V][KLEN:8B][VLEN:8B]...
*/
// write the len header first
let mut w = Vec::with_capacity(128);
self::raw_serialize_map(map, &mut w)?;
Ok(w)
}
mod se {
use super::*;
use crate::coredb::memstore::Keyspace;
/// Serialize a map into a _writable_ thing
pub fn serialize_map(
map: &Coremap<Data, Data>,
model_code: u8,
) -> Result<Vec<u8>, std::io::Error> {
/*
[1B: Model Mark][LEN:8B][KLEN:8B|VLEN:8B][K][V][KLEN:8B][VLEN:8B]...
*/
// write the len header first
let mut w = Vec::with_capacity(128);
self::raw_serialize_map(map, &mut w, model_code)?;
Ok(w)
}
/// Serialize a map and write it to a provided buffer
pub fn raw_serialize_map<W: Write>(map: &Coremap<Data, Data>, w: &mut W) -> std::io::Result<()> {
unsafe {
w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?;
// now the keys and values
for kv in map.iter() {
let (k, v) = (kv.key(), kv.value());
w.write_all(raw_byte_repr(&to_64bit_little_endian!(k.len())))?;
w.write_all(raw_byte_repr(&to_64bit_little_endian!(v.len())))?;
w.write_all(k)?;
w.write_all(v)?;
/// Serialize a map and write it to a provided buffer
pub fn raw_serialize_map<W: Write>(
map: &Coremap<Data, Data>,
w: &mut W,
model_code: u8,
) -> std::io::Result<()> {
unsafe {
w.write_all(raw_byte_repr(&model_code))?;
w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?;
// now the keys and values
for kv in map.iter() {
let (k, v) = (kv.key(), kv.value());
w.write_all(raw_byte_repr(&to_64bit_little_endian!(k.len())))?;
w.write_all(raw_byte_repr(&to_64bit_little_endian!(v.len())))?;
w.write_all(k)?;
w.write_all(v)?;
}
}
Ok(())
}
Ok(())
}
/// Serialize a set and write it to a provided buffer
pub fn raw_serialize_set<W, K, V>(map: &Coremap<K, V>, w: &mut W) -> std::io::Result<()>
where
W: Write,
K: Eq + Hash + AsRef<[u8]>,
{
unsafe {
w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?;
// now the keys and values
for kv in map.iter() {
let key = kv.key().as_ref();
w.write_all(raw_byte_repr(&to_64bit_little_endian!(key.len())))?;
w.write_all(key)?;
/// Serialize a set and write it to a provided buffer
pub fn raw_serialize_set<W, K, V>(map: &Coremap<K, V>, w: &mut W) -> std::io::Result<()>
where
W: Write,
K: Eq + Hash + AsRef<[u8]>,
{
unsafe {
w.write_all(raw_byte_repr(&to_64bit_little_endian!(map.len())))?;
// now the keys and values
for kv in map.iter() {
let key = kv.key().as_ref();
w.write_all(raw_byte_repr(&to_64bit_little_endian!(key.len())))?;
w.write_all(key)?;
}
}
Ok(())
}
Ok(())
}
pub trait DeserializeFrom {
fn is_expected_len(clen: usize) -> bool;
fn from_slice(slice: &[u8]) -> Self;
/// Generate a partition map for the given keyspace
/// ```text
/// [8B: EXTENT]([8B: LEN][?B: PARTITION ID][1B: Storage type])*
/// ```
pub fn raw_serialize_partmap<W: Write>(w: &mut W, keyspace: &Keyspace) -> std::io::Result<()> {
unsafe {
// extent
w.write_all(raw_byte_repr(&to_64bit_little_endian!(keyspace
.tables
.len())))?;
for table in keyspace.tables.iter() {
// partition ID
w.write_all(raw_byte_repr(&to_64bit_little_endian!(table.key().len())))?;
// now storage type
w.write_all(raw_byte_repr(&table.storage_type()))?;
}
}
Ok(())
}
}
impl<const N: usize> DeserializeFrom for Array<u8, N> {
fn is_expected_len(clen: usize) -> bool {
clen <= N
mod de {
use super::*;
use std::collections::HashMap;
pub trait DeserializeFrom {
fn is_expected_len(clen: usize) -> bool;
fn from_slice(slice: &[u8]) -> Self;
}
fn from_slice(slice: &[u8]) -> Self {
unsafe { Self::from_slice(slice) }
impl<const N: usize> DeserializeFrom for Array<u8, N> {
fn is_expected_len(clen: usize) -> bool {
clen <= N
}
fn from_slice(slice: &[u8]) -> Self {
unsafe { Self::from_slice(slice) }
}
}
}
/// Deserialize a set to a custom type
pub fn deserialize_set_ctype<T>(data: &[u8]) -> Option<HashSet<T>>
where
T: DeserializeFrom + Eq + Hash,
{
// First read the length header
if data.len() < 8 {
// so the file doesn't even have the length header? noice, just return
None
} else {
unsafe {
// so we have 8B. Just unsafe access and transmute it
let len = transmute_len(data.as_ptr());
let mut set = HashSet::with_capacity(len);
// this is what we have left: [KLEN:8B]*
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(8)) >= end_ptr {
// not enough space and even if there is a len
// there is no value. This is even true for ZSTs
return None;
/// Deserialize a set to a custom type
pub fn deserialize_set_ctype<T>(data: &[u8]) -> Option<HashSet<T>>
where
T: DeserializeFrom + Eq + Hash,
{
// First read the length header
if data.len() < 8 {
// so the file doesn't even have the length header? noice, just return
None
} else {
unsafe {
// so we have 8B. Just unsafe access and transmute it
let len = transmute_len(data.as_ptr());
let mut set = HashSet::with_capacity(len);
// this is what we have left: [KLEN:8B]*
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(8)) >= end_ptr {
// not enough space and even if there is a len
// there is no value. This is even true for ZSTs
return None;
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
if (ptr.add(lenkey)) > end_ptr {
// not enough data left
return None;
}
if !T::is_expected_len(lenkey) {
return None;
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = T::from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
// push it in
if !set.insert(key) {
// repeat?; that's not what we wanted
return None;
}
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
if (ptr.add(lenkey)) > end_ptr {
// not enough data left
return None;
if ptr == end_ptr {
Some(set)
} else {
// nope, someone gave us more data
None
}
if !T::is_expected_len(lenkey) {
return None;
}
}
}
/// Deserializes a map-like set which has an 1B _bytemark_ for every entry
pub fn deserialize_set_ctype_bytemark<T>(data: &[u8]) -> Option<HashMap<T, u8>>
where
T: DeserializeFrom + Eq + Hash,
{
// First read the length header
if data.len() < 8 {
// so the file doesn't even have the length header? noice, just return
None
} else {
unsafe {
// so we have 8B. Just unsafe access and transmute it
let len = transmute_len(data.as_ptr());
let mut set = HashMap::with_capacity(len);
// this is what we have left: [KLEN:8B]*
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(8)) >= end_ptr {
// not enough space and even if there is a len
// there is no value. This is even true for ZSTs
return None;
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
if (ptr.add(lenkey + 1)) > end_ptr {
// not enough data left
return None;
}
if !T::is_expected_len(lenkey) {
return None;
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = T::from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
let bytemark = ptr::read(ptr);
ptr = ptr.add(1);
// push it in
if set.insert(key, bytemark).is_some() {
// repeat?; that's not what we wanted
return None;
}
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = T::from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
// push it in
if !set.insert(key) {
// repeat?; that's not what we wanted
return None;
if ptr == end_ptr {
Some(set)
} else {
// nope, someone gave us more data
None
}
}
if ptr == end_ptr {
Some(set)
} else {
// nope, someone gave us more data
None
}
}
}
}
/// Deserialize a file that contains a serialized map
pub fn deserialize_map(data: Vec<u8>) -> Option<Coremap<Data, Data>> {
// First read the length header
if data.len() < 8 {
// so the file doesn't even have the length header? noice, just return
None
} else {
unsafe {
/*
UNSAFE(@ohsayan): Everything done here is unsafely safe. We
reinterpret bits of one type as another. What could be worse?
nah, it's not that bad. We know that the byte representations
would be in the way we expect. If the data is corrupted, we
can guarantee that we won't ever read incorrect lengths of data
and we won't read into others' memory (or corrupt our own)
*/
/// Deserialize a file that contains a serialized map. This also returns the model code
pub fn deserialize_map(data: Vec<u8>) -> Option<(Coremap<Data, Data>, u8)> {
// First read the length header
if data.len() < 9 {
// so the file doesn't even have the length/model header? noice, just return
None
} else {
unsafe {
/*
UNSAFE(@ohsayan): Everything done here is unsafely safe. We
reinterpret bits of one type as another. What could be worse?
nah, it's not that bad. We know that the byte representations
would be in the way we expect. If the data is corrupted, we
can guarantee that we won't ever read incorrect lengths of data
and we won't read into others' memory (or corrupt our own)
*/
let mut ptr = data.as_ptr();
let modelcode: u8 = ptr::read(ptr);
// so we have 8B. Just unsafe access and transmute it; nobody cares
let len = transmute_len(data.as_ptr());
let hm = Coremap::with_capacity(len);
// this is what we have left: [KLEN:8B][VLEN:8B]
// move 8 bytes ahead since we're done with len
let mut ptr = data.as_ptr().add(8);
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(16)) >= end_ptr {
// not enough space
// model check
if modelcode > 3 {
// this model isn't supposed to have more than 3. Corrupted data
return None;
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
let lenval = transmute_len(ptr);
ptr = ptr.add(1);
// so we have 8B. Just unsafe access and transmute it; nobody cares
let len = transmute_len(ptr);
// move 8 bytes ahead since we're done with len
ptr = ptr.add(8);
if (ptr.add(lenkey + lenval)) > end_ptr {
// not enough data left
return None;
let hm = Coremap::with_capacity(len);
// this is what we have left: [KLEN:8B][VLEN:8B]
let end_ptr = data.as_ptr().add(data.len());
for _ in 0..len {
if (ptr.add(16)) >= end_ptr {
// not enough space
return None;
}
let lenkey = transmute_len(ptr);
ptr = ptr.add(8);
let lenval = transmute_len(ptr);
ptr = ptr.add(8);
if (ptr.add(lenkey + lenval)) > end_ptr {
// not enough data left
return None;
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = Data::copy_from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
let val = Data::copy_from_slice(slice::from_raw_parts(ptr, lenval));
// move the ptr ahead; done with the value
ptr = ptr.add(lenval);
// push it in
hm.upsert(key, val);
}
if ptr == end_ptr {
Some((hm, modelcode))
} else {
// nope, someone gave us more data
None
}
// get the key as a raw slice, we've already checked if end_ptr is less
let key = Data::copy_from_slice(slice::from_raw_parts(ptr, lenkey));
// move the ptr ahead; done with the key
ptr = ptr.add(lenkey);
let val = Data::copy_from_slice(slice::from_raw_parts(ptr, lenval));
// move the ptr ahead; done with the value
ptr = ptr.add(lenval);
// push it in
hm.upsert(key, val);
}
if ptr == end_ptr {
Some(hm)
} else {
// nope, someone gave us more data
None
}
}
}
}
#[allow(clippy::needless_return)] // Clippy really misunderstands this
unsafe fn transmute_len(start_ptr: *const u8) -> usize {
little_endian!({
// So we have an LE target
is_64_bit!({
// 64-bit LE
return ptr::read_unaligned(start_ptr.cast());
});
not_64_bit!({
// 32-bit LE
let ret1: u64 = ptr::read_unaligned(start_ptr.cast());
// lossy cast
let ret = ret1 as usize;
if ret > (isize::MAX as usize) {
// this is a backup method for us incase a giant 48-bit address is
// somehow forced to be read on this machine
panic!("RT panic: Very high size for current pointer width");
}
return ret;
#[allow(clippy::needless_return)] // Clippy really misunderstands this
unsafe fn transmute_len(start_ptr: *const u8) -> usize {
little_endian!({
// So we have an LE target
is_64_bit!({
// 64-bit LE
return ptr::read_unaligned(start_ptr.cast());
});
not_64_bit!({
// 32-bit LE
let ret1: u64 = ptr::read_unaligned(start_ptr.cast());
// lossy cast
let ret = ret1 as usize;
if ret > (isize::MAX as usize) {
// this is a backup method for us incase a giant 48-bit address is
// somehow forced to be read on this machine
panic!("RT panic: Very high size for current pointer width");
}
return ret;
});
});
});
big_endian!({
// so we have a BE target
is_64_bit!({
// 64-bit big endian
let ret: usize = ptr::read_unaligned(start_ptr.cast());
// swap byte order
return ret.swap_bytes();
});
not_64_bit!({
// 32-bit big endian
let ret: u64 = ptr::read_unaligned(start_ptr.cast());
// swap byte order and lossy cast
let ret = (ret.swap_bytes()) as usize;
// check if overflow
if ret > (isize::MAX as usize) {
// this is a backup method for us incase a giant 48-bit address is
// somehow forced to be read on this machine
panic!("RT panic: Very high size for current pointer width");
}
return ret;
big_endian!({
// so we have a BE target
is_64_bit!({
// 64-bit big endian
let ret: usize = ptr::read_unaligned(start_ptr.cast());
// swap byte order
return ret.swap_bytes();
});
not_64_bit!({
// 32-bit big endian
let ret: u64 = ptr::read_unaligned(start_ptr.cast());
// swap byte order and lossy cast
let ret = (ret.swap_bytes()) as usize;
// check if overflow
if ret > (isize::MAX as usize) {
// this is a backup method for us incase a giant 48-bit address is
// somehow forced to be read on this machine
panic!("RT panic: Very high size for current pointer width");
}
return ret;
});
});
});
}
}

@ -63,7 +63,7 @@ pub(super) fn raw_generate_preload<W: Write>(w: &mut W, store: &Memstore) -> IoR
// generate the meta segment
#[allow(clippy::identity_op)]
w.write_all(&[META_SEGMENT])?;
super::raw_serialize_set(&store.keyspaces, w)?;
super::se::raw_serialize_set(&store.keyspaces, w)?;
Ok(())
}
@ -72,7 +72,7 @@ pub(super) fn raw_generate_preload<W: Write>(w: &mut W, store: &Memstore) -> IoR
/// ([8B: Len][?B: Label])*
/// ```
pub(super) fn raw_generate_partfile<W: Write>(w: &mut W, store: &Keyspace) -> IoResult<()> {
super::raw_serialize_set(&store.tables, w)
super::se::raw_serialize_set(&store.tables, w)
}
/// Reads the preload file and returns a set
@ -89,7 +89,7 @@ pub(super) fn read_preload_raw(preload: Vec<u8>) -> IoResult<HashSet<ObjectID>>
}
}
// all checks complete; time to decode
let ret = super::deserialize_set_ctype(&preload[1..]);
let ret = super::de::deserialize_set_ctype(&preload[1..]);
match ret {
Some(ret) => Ok(ret),
_ => Err(IoError::from(ErrorKind::InvalidData)),
@ -98,7 +98,7 @@ pub(super) fn read_preload_raw(preload: Vec<u8>) -> IoResult<HashSet<ObjectID>>
/// Reads the partfile and returns a set
pub fn read_partfile_raw(partfile: Vec<u8>) -> IoResult<HashSet<ObjectID>> {
match super::deserialize_set_ctype(&partfile) {
match super::de::deserialize_set_ctype(&partfile) {
Some(s) => Ok(s),
None => Err(IoError::from(ErrorKind::InvalidData)),
}

@ -29,9 +29,10 @@ use super::*;
#[test]
fn test_serialize_deserialize_empty() {
let cmap = Coremap::new();
let ser = serialize_map(&cmap).unwrap();
let de = deserialize_map(ser).unwrap();
let ser = se::serialize_map(&cmap, 0).unwrap();
let (de, model_code) = de::deserialize_map(ser).unwrap();
assert!(de.len() == 0);
assert_eq!(0, model_code);
}
#[test]
@ -39,12 +40,13 @@ fn test_ser_de_few_elements() {
let cmap = Coremap::new();
cmap.upsert("sayan".into(), "writes code".into());
cmap.upsert("supersayan".into(), "writes super code".into());
let ser = serialize_map(&cmap).unwrap();
let de = deserialize_map(ser).unwrap();
let ser = se::serialize_map(&cmap, 0).unwrap();
let (de, modelcode) = de::deserialize_map(ser).unwrap();
assert!(de.len() == cmap.len());
assert!(de
.iter()
.all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value())));
assert_eq!(modelcode, 0);
}
cfg_test!(
@ -64,12 +66,13 @@ cfg_test!(
.zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect();
let ser = serialize_map(&cmap).unwrap();
let de = deserialize_map(ser).unwrap();
let ser = se::serialize_map(&cmap, 0).unwrap();
let (de, modelcode) = de::deserialize_map(ser).unwrap();
assert!(de
.iter()
.all(|kv| cmap.get(kv.key()).unwrap().eq(kv.value())));
assert!(de.len() == cmap.len());
assert_eq!(modelcode, 0);
}
#[test]
@ -86,11 +89,11 @@ cfg_test!(
.zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect();
let mut se = serialize_map(&cmap).unwrap();
let mut se = se::serialize_map(&cmap, 0).unwrap();
// random chop
se.truncate(124);
// corrupted
assert!(deserialize_map(se).is_none());
assert!(de::deserialize_map(se).is_none());
}
#[test]
fn test_ser_de_excess_bytes() {
@ -110,11 +113,11 @@ cfg_test!(
.zip(values.iter())
.map(|(k, v)| (Data::from(k.to_owned()), Data::from(v.to_owned())))
.collect();
let mut se = serialize_map(&cmap).unwrap();
let mut se = se::serialize_map(&cmap, 0).unwrap();
// random patch
let patch: Vec<u8> = (0u16..500u16).into_iter().map(|v| (v >> 7) as u8).collect();
se.extend(patch);
assert!(deserialize_map(se).is_none());
assert!(de::deserialize_map(se).is_none());
}
);
@ -125,7 +128,7 @@ fn test_runtime_panic_32bit_or_lower() {
let max = u64::MAX;
let byte_stream = unsafe { raw_byte_repr(&max).to_owned() };
let ptr = byte_stream.as_ptr();
unsafe { transmute_len(ptr) };
unsafe { de::transmute_len(ptr) };
}
mod interface_tests {

Loading…
Cancel
Save