Upgrade all tests to use the driver

next
Sayan Nandan 3 years ago
parent e08cffd187
commit 80fc9e5e9c

11
Cargo.lock generated

@ -813,7 +813,6 @@ version = "0.5.2"
dependencies = [
"proc-macro2",
"quote",
"rand",
"syn",
]
@ -839,6 +838,7 @@ dependencies = [
"serde",
"serde_derive",
"sky_macros",
"skytable",
"tokio",
"tokio-openssl",
"toml",
@ -859,6 +859,15 @@ dependencies = [
"tokio-openssl",
]
[[package]]
name = "skytable"
version = "0.2.3"
source = "git+https://github.com/skytable/client-rust?branch=next#8c04478b630d735002d64a1b9ffc199e6130919c"
dependencies = [
"bytes",
"tokio",
]
[[package]]
name = "slab"
version = "0.4.2"

@ -8,33 +8,34 @@ build = "build.rs"
[dependencies]
tokio = { version = "1.5.0", features = ["full"] }
bytes = "1.0.1"
libsky = {path ="../libsky"}
libsky = { path = "../libsky" }
bincode = "1.3.3"
parking_lot = "0.11.1"
lazy_static = "1.4.0"
serde_derive = "1.0.125"
futures = "0.3.14"
serde = {version = "1.0.125", features= ["derive"]}
serde = { version = "1.0.125", features = ["derive"] }
toml = "0.5.8"
clap = {version = "2.33.3", features=["yaml"]}
clap = { version = "2.33.3", features = ["yaml"] }
env_logger = "0.8.3"
log = "0.4.14"
chrono = "0.4.19"
regex = "1.5.4"
sky_macros = {path="../sky-macros"}
sky_macros = { path = "../sky-macros" }
tokio-openssl = "0.6.1"
openssl = { version = "0.10.34", features = ["vendored"] }
[target.'cfg(not(target_env = "msvc"))'.dependencies]
jemallocator = "0.3.2"
[target.'cfg(target_os = "windows")'.dependencies]
winapi = {version="0.3.9", features=["fileapi"]}
winapi = { version = "0.3.9", features = ["fileapi"] }
[target.'cfg(unix)'.build-dependencies]
cc = "1.0.67"
[dev-dependencies]
tokio = { version = "1.5.0", features = ["test-util"] }
skytable = { git = "https://github.com/skytable/client-rust", features = ["async"], default-features = false, branch = "next" }
[target.'cfg(unix)'.dependencies]
libc = "0.2.94"

@ -390,12 +390,6 @@ impl CoreDB {
pub fn get_htable_deep_clone(&self) -> HTable<String, Data> {
(*self.acquire_read().get_ref()).clone()
}
#[cfg(test)]
/// **⚠⚠⚠ This deletes everything stored in the in-memory table**
pub fn finish_db(&self) {
self.acquire_write().unwrap().coremap.clear()
}
}
impl Drop for CoreDB {

@ -394,34 +394,3 @@ pub async fn run(
}
(db, cloned_descriptor)
}
/// This is a **test only** function
/// This takes a `CoreDB` object so that keys can be modified externally by
/// the testing suite. This will **not save any data to disk**!
/// > **This is not for release builds in any way!**
#[cfg(test)]
pub async fn test_run(listener: TcpListener, db: CoreDB, sig: impl Future) {
let (signal, _) = broadcast::channel(1);
let (terminate_tx, terminate_rx) = mpsc::channel(1);
let mut server = Listener {
listener,
db,
climit: Arc::new(Semaphore::new(50000)),
signal,
terminate_tx,
terminate_rx,
};
tokio::select! {
_ = server.run() => {}
_ = sig => {}
}
let Listener {
mut terminate_rx,
terminate_tx,
signal,
..
} = server;
drop(signal);
drop(terminate_tx);
let _ = terminate_rx.recv().await;
}

File diff suppressed because it is too large Load Diff

@ -26,34 +26,4 @@
//! This module contains automated tests for queries
use crate::coredb::CoreDB;
use crate::dbnet;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio::net::TcpListener;
mod kvengine;
/// The function macro returns the name of a function
#[macro_export]
macro_rules! __func__ {
() => {{
fn f() {}
fn typename<T>(_: T) -> &'static str {
std::any::type_name::<T>()
}
let fn_name = typename(f);
&fn_name[..fn_name.len() - 3]
}};
}
async fn start_test_server(port: u16, db: Option<CoreDB>) -> SocketAddr {
let mut socket = String::from("127.0.0.1:");
socket.push_str(&port.to_string());
let db = db.unwrap_or(CoreDB::new_empty(0, Arc::new(None)));
let listener = TcpListener::bind(socket)
.await
.expect(&format!("Failed to bind to port {}", port));
let addr = listener.local_addr().unwrap();
tokio::spawn(async move { dbnet::test_run(listener, db, tokio::signal::ctrl_c()).await });
addr
}

@ -12,5 +12,4 @@ proc-macro = true
[dependencies]
syn = {version = "1.0.72", features = ["full"]}
quote = "1.0.9"
rand = "0.8.3"
proc-macro2 = "1.0.26"

@ -35,21 +35,20 @@
//!
//! ### Macros and ghost values
//! - `#[dbtest]`:
//! - `stream` - `tokio::net::TcpListener`
//! - `asyncdb` - `sdb::coredb::CoreDB`
//! - `con` - `skytable::AsyncConnection`
//! - `query` - `skytable::Query`
//!
use proc_macro::TokenStream;
use proc_macro2::Span;
use quote::quote;
use rand::*;
use std::collections::HashSet;
use syn::{self};
/// This parses a function within a `dbtest` module
///
/// This accepts an `async` function and returns a non-`async` version of it - by
/// making the body of the function use the `tokio` runtime
fn parse_dbtest(mut input: syn::ItemFn, rand: u16) -> Result<TokenStream, syn::Error> {
fn parse_dbtest(mut input: syn::ItemFn) -> Result<TokenStream, syn::Error> {
let sig = &mut input.sig;
let fname = sig.ident.to_string();
let body = &input.block;
@ -64,13 +63,14 @@ fn parse_dbtest(mut input: syn::ItemFn, rand: u16) -> Result<TokenStream, syn::E
}
sig.asyncness = None;
let body = quote! {
let asyncdb = crate::coredb::CoreDB::new_empty(0, std::sync::Arc::new(None));
let addr = crate::tests::start_test_server(#rand, Some(asyncdb.clone())).await;
let mut stream = tokio::net::TcpStream::connect(&addr).await.unwrap();
let mut con = skytable::AsyncConnection::new("127.0.0.1", 2003).await.unwrap();
let mut query = skytable::Query::new();
#body
stream.shutdown().await.unwrap();
asyncdb.finish_db();
drop(asyncdb);
let mut __flush__ = skytable::Query::new(); __flush__.arg("flushdb");
std::assert_eq!(
con.run_simple_query(__flush__).await.unwrap(),
skytable::Response::Item(skytable::Element::RespCode(skytable::RespCode::Okay))
);
};
let result = quote! {
#header
@ -90,7 +90,7 @@ fn parse_dbtest(mut input: syn::ItemFn, rand: u16) -> Result<TokenStream, syn::E
}
/// This function checks if the current function is eligible to be a test
fn parse_test_sig(input: syn::ItemFn, rand: u16) -> TokenStream {
fn parse_test_sig(input: syn::ItemFn) -> TokenStream {
for attr in &input.attrs {
if attr.path.is_ident("test") {
let msg = "second test attribute is supplied";
@ -106,7 +106,7 @@ fn parse_test_sig(input: syn::ItemFn, rand: u16) -> TokenStream {
.to_compile_error()
.into();
}
parse_dbtest(input, rand).unwrap_or_else(|e| e.to_compile_error().into())
parse_dbtest(input).unwrap_or_else(|e| e.to_compile_error().into())
}
/// This function accepts an entire module which comprises of `dbtest` functions.
@ -172,25 +172,8 @@ fn parse_test_module(args: TokenStream, item: TokenStream) -> TokenStream {
.to_compile_error()
.into();
}
let mut rng = thread_rng();
let mut in_set = HashSet::<u16>::new();
/*
* As per [this comment](https://github.com/actions/virtual-environments/issues/3275#issuecomment-828214572)
* from the GitHub Actions team, Windows reserves several ports. As our runners are currently hosted on GHA which use Hyper-V VMs
* these ports will be blocked too and thse blocks are the reasons behind spurious test failures on Windows.
* As a consequence to this, we will exclude these port ranges from the random port allocation set
* (by setting them to 'already used' or 'already in in_set').
*/
#[cfg(windows)]
add_reserved_ports(&mut in_set);
let mut result = quote! {};
for item in content {
// We set the port range to the 'dynamic port range' as per IANA's allocation guidelines
let mut rand: u16 = rng.gen_range(49152..=65535);
while in_set.contains(&rand) {
rand = rng.gen_range(49152..=65535);
}
in_set.insert(rand);
match item {
// We just care about functions, so parse functions and ignore everything
// else
@ -202,7 +185,7 @@ fn parse_test_module(args: TokenStream, item: TokenStream) -> TokenStream {
};
continue;
}
let inp = parse_test_sig(function, rand);
let inp = parse_test_sig(function);
let __tok: syn::ItemFn = syn::parse_macro_input!(inp as syn::ItemFn);
let tok = quote! {
#__tok
@ -236,20 +219,22 @@ fn parse_string(int: syn::Lit, span: Span, field: &str) -> Result<String, syn::E
#[proc_macro_attribute]
/// The `dbtest` macro starts an async server in the background and is meant for
/// use within the `sdb` or `WORKSPACEROOT/server/` crate. If you use this compiler
/// use within the `skyd` or `WORKSPACEROOT/server/` crate. If you use this compiler
/// macro in any other crate, you'll simply get compilation errors
///
/// All tests will clean up all values once a single test is over. **These tests should not
/// be run in multi-threaded environments because they often use the same keys**
/// ## _Ghost_ values
/// This macro gives a `tokio::net::TcpStream` accessible by the `stream` variable and a `sdb::coredb::CoreDB`
/// accessible by the `asyncdb` variable.
/// This macro gives a `skytable::AsyncConnection` accessible by the `con` variable and a mutable
/// `skytable::Query` accessible by the `query` variable
///
/// ## Requirements
///
/// The `#[dbtest]` macro expects several things. The calling crate:
/// - should have the `tokio` crate as a dependency and should have the
/// `features` set to full
/// - should have a function to start an async test server, available with the following path:
/// `crate::tests::start_test_server` which accepts an `u16` as the port number
/// - should have the `skytable` crate as a dependency and should have the `features` set to `async` and version
/// upstreamed to `next` on skytable/client-rust
///
/// ## Conventions
/// Since `proc_macro` cannot accept _file-linked_ modules and only accepts inline modules, we have made a workaround, which
@ -260,78 +245,3 @@ fn parse_string(int: syn::Lit, span: Span, field: &str) -> Result<String, syn::E
pub fn dbtest(args: TokenStream, item: TokenStream) -> TokenStream {
parse_test_module(args, item)
}
#[cfg(windows)]
/// We will parse the output from `netsh interface ipv4 show excludedportrange protocol=tcp` on Windows
/// We will then use this to add the port ranges to our `in_set` to not use them
///
/// This is what a typical output of the above command looks like:
/// ```text
///
/// Protocol tcp Port Exclusion Ranges
///
/// Start Port End Port
/// ---------- --------
/// 8501 8501
/// 47001 47001
///
/// * - Administered port exclusions.
///
/// ```
/// So, we first ignore all empty lines and then validate the headers (i.e "start port", "end port", "protocol tcp", etc)
/// and then once that's all good -- we parse the start and end ports and then turn it into a range, and run an iterator
/// over every element in this range, pushing elements into our `set` (or `in_set`)
fn add_reserved_ports(set: &mut HashSet<u16>) {
use std::process::Command;
let mut netsh = Command::new("netsh");
netsh
.arg("interface")
.arg("ipv4")
.arg("show")
.arg("excludedportrange")
.arg("protocol=tcp");
let output = netsh.output().unwrap();
if output.stderr.len() != 0 {
panic!("Errored while trying to get port exclusion ranges on Windows");
}
let stdout = String::from_utf8_lossy(&output.stdout);
let lines: Vec<&str> = stdout
.lines()
.filter(|line| line.len() != 0)
.map(|line| line.trim())
.collect();
let mut line_iter = lines.into_iter();
if let Some("Protocol tcp Port Exclusion Ranges") = line_iter.next() {
} else {
panic!("netsh returned bad output on Windows");
}
match (line_iter.next(), line_iter.next()) {
(Some(line2), Some(line3))
if (line2.contains("Start Port") && line2.contains("End Port"))
&& (line3.contains("---")) => {}
_ => panic!("netsh returned bad stdout for parsing port exclusion ranges on Windows"),
}
// Great, so now we the stdout is as we expected it to be
// Now we will trim each line, get the port range and parse it into u16s
for line in line_iter {
if line.starts_with("*") {
// The last line should look like `* - Administered port exclusions.`
break;
}
let port_high_low: Vec<u16> = line
.split_whitespace()
.map(|port_string| {
port_string
.parse::<u16>()
.expect("Returned port by netsh was not a valid u16")
})
.collect();
if port_high_low.len() != 2 {
panic!("netsh returned more than three columns instead of the expected two for parsing port exclusion ranges");
}
let (range_low, range_high) = (port_high_low[0], port_high_low[1]);
(range_low..=range_high).into_iter().for_each(|port| {
set.insert(port);
})
}
}

Loading…
Cancel
Save