1
0
Fork 0

Final check for non-empty node

master
Philip O'Toole 9 months ago
parent 8cc93e3ff7
commit d5ea88d785

@ -107,24 +107,32 @@ func main() {
log.Fatalf("failed to create store: %s", err.Error())
}
// Install the auto-restore file, if necessary.
// Install the auto-restore data, if necessary.
if cfg.AutoRestoreFile != "" {
log.Printf("auto-restore requested, initiating download")
start := time.Now()
path, errOK, err := restore.DownloadFile(mainCtx, cfg.AutoRestoreFile)
hd, err := store.HasData(str.Path())
if err != nil {
var b strings.Builder
b.WriteString(fmt.Sprintf("failed to download auto-restore file: %s", err.Error()))
if errOK {
b.WriteString(", continuing with node startup anyway")
log.Print(b.String())
} else {
log.Fatal(b.String())
}
log.Fatalf("failed to check for existing data: %s", err.Error())
}
if hd {
log.Printf("auto-restore requested, but data already exists in %s, skipping", str.Path())
} else {
log.Printf("auto-restore file downloaded in %s", time.Since(start))
if err := str.SetRestorePath(path); err != nil {
log.Fatalf("failed to preload auto-restore data: %s", err.Error())
log.Printf("auto-restore requested, initiating download")
start := time.Now()
path, errOK, err := restore.DownloadFile(mainCtx, cfg.AutoRestoreFile)
if err != nil {
var b strings.Builder
b.WriteString(fmt.Sprintf("failed to download auto-restore file: %s", err.Error()))
if errOK {
b.WriteString(", continuing with node startup anyway")
log.Print(b.String())
} else {
log.Fatal(b.String())
}
} else {
log.Printf("auto-restore file downloaded in %s", time.Since(start))
if err := str.SetRestorePath(path); err != nil {
log.Fatalf("failed to preload auto-restore data: %s", err.Error())
}
}
}
}

@ -112,6 +112,61 @@ class TestAutoRestoreS3(unittest.TestCase):
os.remove(compressed_tmp_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_skipped_if_daya(self):
'''Test that automatic restores are skipped if the node has data'''
node = None
cfg = None
path = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
# Upload a test SQLite file to S3.
tmp_file = self.create_sqlite_file()
compressed_tmp_file = temp_file()
gzip_compress(tmp_file, compressed_tmp_file)
path = "restore/"+random_string(32)
upload_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path, compressed_tmp_file)
# Create the auto-restore config file
auto_restore_cfg = {
"version": 1,
"type": "s3",
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"path": path
}
}
cfg = write_random_file(json.dumps(auto_restore_cfg))
# Create a new node, write some data to it.
n0 = Node(RQLITED_PATH, '0')
n0.start()
n0.wait_for_ready()
n0.execute('CREATE TABLE bar (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
n0.stop()
# Create a new node, using the directory from the previous node, but check
# that data is not restored from S3, wiping out the existing data.
n1 = Node(RQLITED_PATH, '0', dir=n0.dir, auto_restore=cfg)
n1.start()
n1.wait_for_ready()
j = n1.query('SELECT * FROM bar')
self.assertEqual(j, d_("{'results': [{'values': [[1, 'fiona']], 'types': ['integer', 'text'], 'columns': ['id', 'name']}]}"))
deprovision_node(n0)
deprovision_node(n1)
os.remove(cfg)
os.remove(tmp_file)
os.remove(compressed_tmp_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
class TestAutoBackupS3(unittest.TestCase):
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_no_compress(self):

Loading…
Cancel
Save