path mess

main
Ziyang Hu 2 years ago
parent 523cc8a2ac
commit 4437800bdf

2
Cargo.lock generated

@ -647,7 +647,7 @@ dependencies = [
[[package]]
name = "cozorocks"
version = "0.1.4"
version = "0.1.5"
dependencies = [
"cc",
"cxx",

@ -118,7 +118,7 @@ document-features = "0.2.6"
rayon = { version = "1.5.3", optional = true }
minreq = { version = "2.6.0", features = ["https-rustls"], optional = true }
tikv-jemallocator-global = { version = "0.5.0", optional = true }
cozorocks = { path = "../cozorocks", version = "0.1.4", optional = true }
cozorocks = { path = "../cozorocks", version = "0.1.5", optional = true }
sled = { version = "0.34.7", optional = true }
tikv-client = { version = "0.1.0", optional = true }
tokio = { version = "1.21.2", optional = true }

@ -32,6 +32,7 @@
#![allow(clippy::too_many_arguments)]
use std::collections::BTreeMap;
use std::path::Path;
#[allow(unused_imports)]
use std::time::Instant;
@ -115,12 +116,12 @@ impl DbInstance {
/// `path` is ignored for `mem` and `tikv` engines.
/// `options` is ignored for every engine except `tikv`.
#[allow(unused_variables)]
pub fn new(engine: &str, path: &str, options: &str) -> Result<Self> {
pub fn new(engine: &str, path: impl AsRef<Path>, options: &str) -> Result<Self> {
let options = if options.is_empty() { "{}" } else { options };
Ok(match engine {
"mem" => Self::Mem(new_cozo_mem()?),
#[cfg(feature = "storage-sqlite")]
"sqlite" => Self::Sqlite(new_cozo_sqlite(path.to_string())?),
"sqlite" => Self::Sqlite(new_cozo_sqlite(path)?),
#[cfg(feature = "storage-rocksdb")]
"rocksdb" => Self::RocksDb(new_cozo_rocksdb(path)?),
#[cfg(feature = "storage-sled")]
@ -290,7 +291,7 @@ impl DbInstance {
self.import_relations(j_obj)
}
/// Dispatcher method. See [crate::Db::backup_db].
pub fn backup_db(&self, out_file: String) -> Result<()> {
pub fn backup_db(&self, out_file: impl AsRef<Path>) -> Result<()> {
match self {
DbInstance::Mem(db) => db.backup_db(out_file),
#[cfg(feature = "storage-sqlite")]
@ -304,14 +305,14 @@ impl DbInstance {
}
}
/// Backup the running database into an Sqlite file, with JSON string return value
pub fn backup_db_str(&self, out_file: &str) -> String {
match self.backup_db(out_file.to_string()) {
pub fn backup_db_str(&self, out_file: impl AsRef<Path>) -> String {
match self.backup_db(out_file) {
Ok(_) => json!({"ok": true}).to_string(),
Err(err) => json!({"ok": false, "message": err.to_string()}).to_string(),
}
}
/// Restore from an Sqlite backup
pub fn restore_backup(&self, in_file: &str) -> Result<()> {
pub fn restore_backup(&self, in_file: impl AsRef<Path>) -> Result<()> {
match self {
DbInstance::Mem(db) => db.restore_backup(in_file),
#[cfg(feature = "storage-sqlite")]
@ -325,14 +326,14 @@ impl DbInstance {
}
}
/// Restore from an Sqlite backup, with JSON string return value
pub fn restore_backup_str(&self, in_file: &str) -> String {
pub fn restore_backup_str(&self, in_file: impl AsRef<Path>) -> String {
match self.restore_backup(in_file) {
Ok(_) => json!({"ok": true}).to_string(),
Err(err) => json!({"ok": false, "message": err.to_string()}).to_string(),
}
}
/// Dispatcher method. See [crate::Db::import_from_backup].
pub fn import_from_backup(&self, in_file: &str, relations: &[String]) -> Result<()> {
pub fn import_from_backup(&self, in_file: impl AsRef<Path>, relations: &[String]) -> Result<()> {
match self {
DbInstance::Mem(db) => db.import_from_backup(in_file, relations),
#[cfg(feature = "storage-sqlite")]

@ -10,6 +10,7 @@ use std::collections::btree_map::Entry;
use std::collections::BTreeMap;
use std::default::Default;
use std::fmt::{Debug, Formatter};
use std::path::Path;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::{Arc, Mutex, RwLock};
#[allow(unused_imports)]
@ -342,7 +343,7 @@ impl<'s, S: Storage<'s>> Db<S> {
}
/// Backup the running database into an Sqlite file
#[allow(unused_variables)]
pub fn backup_db(&'s self, out_file: String) -> Result<()> {
pub fn backup_db(&'s self, out_file: impl AsRef<Path>) -> Result<()> {
#[cfg(feature = "storage-sqlite")]
{
let sqlite_db = crate::new_cozo_sqlite(out_file)?;
@ -360,10 +361,10 @@ impl<'s, S: Storage<'s>> Db<S> {
}
/// Restore from an Sqlite backup
#[allow(unused_variables)]
pub fn restore_backup(&'s self, in_file: &str) -> Result<()> {
pub fn restore_backup(&'s self, in_file: impl AsRef<Path>) -> Result<()> {
#[cfg(feature = "storage-sqlite")]
{
let sqlite_db = crate::new_cozo_sqlite(in_file.to_string())?;
let sqlite_db = crate::new_cozo_sqlite(in_file)?;
let mut s_tx = sqlite_db.transact()?;
{
let mut tx = self.transact()?;
@ -391,13 +392,13 @@ impl<'s, S: Storage<'s>> Db<S> {
/// Note that triggers are _not_ run for the relations, if any exists.
/// If you need to activate triggers, use queries with parameters.
#[allow(unused_variables)]
pub fn import_from_backup(&'s self, in_file: &str, relations: &[String]) -> Result<()> {
pub fn import_from_backup(&'s self, in_file: impl AsRef<Path>, relations: &[String]) -> Result<()> {
#[cfg(not(feature = "storage-sqlite"))]
bail!("backup requires the 'storage-sqlite' feature to be enabled");
#[cfg(feature = "storage-sqlite")]
{
let source_db = crate::new_cozo_sqlite(in_file.to_string())?;
let source_db = crate::new_cozo_sqlite(in_file)?;
let mut src_tx = source_db.transact()?;
let mut dst_tx = self.transact_write()?;

@ -29,12 +29,16 @@ const CURRENT_STORAGE_VERSION: u64 = 1;
/// This is currently the fastest persistent storage and it can
/// sustain huge concurrency.
/// Supports concurrent readers and writers.
pub fn new_cozo_rocksdb(path: impl AsRef<str>) -> Result<Db<RocksDbStorage>> {
pub fn new_cozo_rocksdb(path: impl AsRef<Path>) -> Result<Db<RocksDbStorage>> {
let builder = DbBuilder::default().path(path.as_ref());
let path = builder.opts.db_path;
fs::create_dir_all(path)
.map_err(|err| BadDbInit(format!("cannot create directory {}: {}", path, err)))?;
let path_buf = PathBuf::from(path);
fs::create_dir_all(path.as_ref()).map_err(|err| {
BadDbInit(format!(
"cannot create directory {}: {}",
path.as_ref().to_string_lossy(),
err
))
})?;
let path_buf = PathBuf::from(path.as_ref());
let is_new = {
let mut manifest_path = path_buf.clone();
@ -209,7 +213,7 @@ impl<'s> StoreTx<'s> for RocksDbTx {
inner,
upper_bound: upper.to_vec(),
next_bound: lower.to_owned(),
valid_at
valid_at,
})
}
@ -274,12 +278,11 @@ impl Iterator for RocksDbIterator {
}
}
pub(crate) struct RocksDbSkipIterator {
inner: DbIter,
upper_bound: Vec<u8>,
next_bound: Vec<u8>,
valid_at: ValidityTs
valid_at: ValidityTs,
}
impl RocksDbSkipIterator {
@ -314,7 +317,6 @@ impl Iterator for RocksDbSkipIterator {
}
}
pub(crate) struct RocksDbIteratorRaw {
inner: DbIter,
started: bool,

@ -6,6 +6,7 @@
* You can obtain one at https://mozilla.org/MPL/2.0/.
*/
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, RwLock, RwLockReadGuard, RwLockWriteGuard};
use ::sqlite::Connection;
@ -23,7 +24,7 @@ use crate::utils::swap_option_result;
#[derive(Clone)]
pub struct SqliteStorage {
lock: Arc<RwLock<()>>,
name: String,
name: PathBuf,
pool: Arc<Mutex<Vec<ConnectionWithFullMutex>>>,
}
@ -35,8 +36,8 @@ pub struct SqliteStorage {
///
/// You must provide a disk-based path: `:memory:` is not OK.
/// If you want a pure memory storage, use [`new_cozo_mem`](crate::new_cozo_mem).
pub fn new_cozo_sqlite(path: String) -> Result<crate::Db<SqliteStorage>> {
if path.is_empty() {
pub fn new_cozo_sqlite(path: impl AsRef<Path>) -> Result<crate::Db<SqliteStorage>> {
if path.as_ref().to_str() == Some("") {
bail!("empty path for sqlite storage")
}
let conn = Connection::open_with_full_mutex(&path).into_diagnostic()?;
@ -52,7 +53,7 @@ pub fn new_cozo_sqlite(path: String) -> Result<crate::Db<SqliteStorage>> {
let ret = crate::Db::new(SqliteStorage {
lock: Arc::new(Default::default()),
name: path,
name: PathBuf::from(path.as_ref()),
pool: Arc::new(Mutex::new(vec![])),
})?;

@ -1,6 +1,6 @@
[package]
name = "cozorocks"
version = "0.1.4"
version = "0.1.5"
edition = "2021"
license = "MPL-2.0"
authors = ["Ziyang Hu"]

@ -66,7 +66,7 @@ shared_ptr <RocksDbBridge> open_db(const DbOpts &opts, RocksDbStatus &status) {
DBOptions loaded_db_opt;
std::vector<ColumnFamilyDescriptor> loaded_cf_descs;
ConfigOptions config_options;
string options_path = string(opts.options_path);
string options_path = convert_vec_to_string(opts.options_path);
Status s = LoadOptionsFromFile(config_options, options_path, &loaded_db_opt,
&loaded_cf_descs);
if (!s.ok()) {
@ -122,7 +122,7 @@ shared_ptr <RocksDbBridge> open_db(const DbOpts &opts, RocksDbStatus &status) {
shared_ptr <RocksDbBridge> db = make_shared<RocksDbBridge>();
db->db_path = string(opts.db_path);
db->db_path = convert_vec_to_string(opts.db_path);
TransactionDB *txn_db = nullptr;
write_status(

@ -13,6 +13,10 @@ inline Slice convert_slice(RustBytes d) {
return {reinterpret_cast<const char *>(d.data()), d.size()};
}
inline string convert_vec_to_string(const rust::Vec<uint8_t> &d) {
return {reinterpret_cast<const char *>(d.data()), d.size()};
}
inline string convert_slice_to_string(RustBytes d) {
return {reinterpret_cast<const char *>(d.data()), d.size()};
}

@ -7,20 +7,43 @@
*/
use cxx::*;
use std::path::Path;
use crate::bridge::ffi::*;
use crate::bridge::tx::TxBuilder;
#[derive(Default, Clone)]
pub struct DbBuilder<'a> {
pub opts: DbOpts<'a>,
pub struct DbBuilder {
pub opts: DbOpts,
}
impl<'a> Default for DbOpts<'a> {
fn path2buf(path: impl AsRef<Path>) -> Vec<u8> {
#[cfg(target_os = "windows")]
{
use std::os::windows::ffi::OsStrExt;
path.as_ref()
.as_os_str()
.encode_wide()
.map(|b| {
let b = b.to_ne_bytes();
b.get(0).map(|s| *s).into_iter().chain(b.get(1).map(|s| *s))
})
.flatten()
.collect::<Vec<u8>>()
}
#[cfg(not(target_os = "windows"))]
{
use std::os::unix::ffi::OsStrExt;
let path_arr = path.as_ref().as_os_str().as_bytes();
path_arr.to_vec()
}
}
impl Default for DbOpts {
fn default() -> Self {
Self {
db_path: "",
options_path: "",
db_path: vec![],
options_path: vec![],
prepare_for_bulk_load: false,
increase_parallelism: 0,
optimize_level_style_compaction: false,
@ -43,13 +66,13 @@ impl<'a> Default for DbOpts<'a> {
}
}
impl<'a> DbBuilder<'a> {
pub fn path(mut self, path: &'a str) -> Self {
self.opts.db_path = path;
impl DbBuilder {
pub fn path(mut self, path: impl AsRef<Path>) -> Self {
self.opts.db_path = path2buf(path);
self
}
pub fn options_path(mut self, options_path: &'a str) -> Self {
self.opts.options_path = options_path;
pub fn options_path(mut self, path: impl AsRef<Path>) -> Self {
self.opts.options_path = path2buf(path);
self
}
pub fn prepare_for_bulk_load(mut self, val: bool) -> Self {
@ -109,10 +132,7 @@ impl<'a> DbBuilder<'a> {
pub fn build(self) -> Result<RocksDb, RocksDbStatus> {
let mut status = RocksDbStatus::default();
let result = open_db(
&self.opts,
&mut status,
);
let result = open_db(&self.opts, &mut status);
if status.is_ok() {
Ok(RocksDb { inner: result })
} else {

@ -20,9 +20,9 @@ pub(crate) mod tx;
#[cxx::bridge]
pub(crate) mod ffi {
#[derive(Debug, Clone)]
struct DbOpts<'a> {
pub db_path: &'a str,
pub options_path: &'a str,
struct DbOpts {
pub db_path: Vec<u8>,
pub options_path: Vec<u8>,
pub prepare_for_bulk_load: bool,
pub increase_parallelism: usize,
pub optimize_level_style_compaction: bool,
@ -122,8 +122,8 @@ pub(crate) mod ffi {
type RocksDbBridge;
fn get_db_path(self: &RocksDbBridge) -> &CxxString;
fn open_db<'a>(
builder: &'a DbOpts<'a>,
fn open_db(
builder: &DbOpts,
status: &mut RocksDbStatus,
) -> SharedPtr<RocksDbBridge>;
fn transact(self: &RocksDbBridge) -> UniquePtr<TxBridge>;

Loading…
Cancel
Save