1
0
Fork 0

Fix auto-restart tests

master
Philip O'Toole 8 months ago
parent c0d88881bb
commit a6313efcd1

@ -199,28 +199,24 @@ class TestAutoBackupS3(unittest.TestCase):
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a node, enable automatic backups, and start it. Then
# create a table and insert a row. Wait for a backup to happen.
# create a table and insert a row. Wait for an initial backup
# to happen because there is no data in the cloud.
node = Node(RQLITED_PATH, '0', auto_backup=cfg)
node.start()
node.wait_for_leader()
i = node.num_auto_backups()[0]
node.wait_for_upload(1)
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
counts = node.wait_for_upload(i+1)
node.wait_for_upload(2)
# Wait and check that no further backups have been made.
time.sleep(2)
self.assertEqual(node.num_auto_backups()[0], counts[0])
node.wait_until_uploads_idle()
# Write one more row, confirm another backup is made.
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
counts = node.wait_for_upload(i+2)
node.wait_for_upload(3)
# Wait and check that no further backups have been made.
time.sleep(2)
self.assertEqual(node.num_auto_backups()[0], counts[0])
node.wait_until_uploads_idle()
# Download the backup file from S3 and check it.
backup_data = download_s3_object(access_key_id, secret_access_key_id,
@ -230,7 +226,7 @@ class TestAutoBackupS3(unittest.TestCase):
c = conn.cursor()
c.execute('SELECT * FROM foo')
rows = c.fetchall()
self.assertEqual(len(rows), 2)
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][1], 'fiona')
conn.close()
@ -240,11 +236,13 @@ class TestAutoBackupS3(unittest.TestCase):
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_leader_only(self):
'''Test that automatic backups to S3 only happen on the leader'''
def test_no_compress_vacuum(self):
'''Test that automatic backups to S3 work with compression off and vacuum on'''
node = None
cfg = None
path = None
backup_file = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
@ -255,6 +253,7 @@ class TestAutoBackupS3(unittest.TestCase):
"type": "s3",
"interval": "100ms",
"no_compress": True,
"vacuum": True,
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
@ -265,37 +264,46 @@ class TestAutoBackupS3(unittest.TestCase):
}
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a cluster with automatic backups enabled.
leader = Node(RQLITED_PATH, '0', auto_backup=cfg)
leader.start()
leader.wait_for_leader()
follower = Node(RQLITED_PATH, '1', auto_backup=cfg)
follower.start(join=leader.RaftAddr())
follower.wait_for_leader()
# Create a node, enable automatic backups, and start it. Then
# create a table and insert a row. An initial backup will happen
# because there is no data in the cloud.
node = Node(RQLITED_PATH, '0', auto_backup=cfg)
node.start()
node.wait_for_leader()
node.wait_for_upload(1)
# Then create a table and insert a row. Wait for a backup to happen.
i = leader.num_auto_backups()[0]
leader.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
leader.execute('INSERT INTO foo(name) VALUES("fiona")')
leader.wait_for_all_fsm()
leader.wait_for_upload(i+1)
# Then create a table and insert a row. Wait for another backup to happen.
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
node.wait_for_all_fsm()
j = node.query('SELECT count(*) FROM foo', level='strong')
self.assertEqual(j, d_("{'results': [{'values': [[0]], 'types': ['integer'], 'columns': ['count(*)']}]}"))
node.wait_for_upload(2)
# Confirm that the follower has performed no backups.
time.sleep(5)
self.assertEqual(follower.num_auto_backups()[0], 0)
# Download the backup file from S3 and check it.
backup_data = download_s3_object(access_key_id, secret_access_key_id,
S3_BUCKET, path)
backup_file = write_random_file(backup_data, mode='wb')
conn = sqlite3.connect(backup_file)
c = conn.cursor()
c.execute('SELECT * FROM foo')
rows = c.fetchall()
self.assertEqual(len(rows), 0)
conn.close()
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
deprovision_node(leader)
deprovision_node(follower)
deprovision_node(node)
os.remove(cfg)
os.remove(backup_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_no_upload_restart(self):
'''Test that restarting a node that already upload doesn't upload again'''
def test_compress(self):
'''Test that automatic backups to S3 work with compression on'''
node = None
cfg = None
path = None
compressed_backup_file = None
backup_file = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
@ -305,57 +313,58 @@ class TestAutoBackupS3(unittest.TestCase):
"version": 1,
"type": "s3",
"interval": "100ms",
"no_compress": True,
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"path": path
}
}
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a cluster with automatic backups enabled.
node = Node(RQLITED_PATH, '0', auto_backup=cfg, raft_snap_int='300s')
# Create a node, enable automatic backups, and start it. Then
# create a table and insert a row. Wait for a backup to happen
# because there is no data in the cloud.
node = Node(RQLITED_PATH, '0', auto_backup=cfg)
node.start()
node.wait_for_leader()
node.wait_for_upload(1)
# Then create a table and insert a row. Wait for a backup to happen.
i = node.num_auto_backups()[0]
# Then create a table and insert rows. Wait for another backup to happen.
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
node.wait_for_upload(i+1)
# Restart the node, and confirm no backup is uploaded just due to the restart.
node.stop(graceful=True)
node.start()
node.wait_for_leader()
node.wait_for_upload_skipped_sum(1)
for _ in range(100):
node.execute('INSERT INTO foo(name) VALUES("fiona")')
j = node.query('SELECT count(*) FROM foo', level='strong')
self.assertEqual(j, d_("{'results': [{'values': [[100]], 'types': ['integer'], 'columns': ['count(*)']}]}"))
# Insert a row, make sure a backup will happen now.
i = node.num_auto_backups()[0]
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
node.wait_for_upload(i+1)
node.wait_until_uploads_idle()
# Make sure we go back to skipping backups.
i = node.num_auto_backups()[0]
node.wait_for_upload_skipped(i+1)
# Download the backup file from S3 and check it.
backup_data = download_s3_object(access_key_id, secret_access_key_id,
S3_BUCKET, path)
compressed_backup_file = write_random_file(backup_data, mode='wb')
backup_file = gunzip_file(compressed_backup_file)
conn = sqlite3.connect(backup_file)
c = conn.cursor()
c.execute('SELECT count(*) FROM foo WHERE name="fiona"')
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 100)
conn.close()
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
deprovision_node(node)
os.remove(cfg)
os.remove(compressed_backup_file)
os.remove(backup_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_no_compress_vacuum(self):
'''Test that automatic backups to S3 work with compression off and vacuum on'''
def test_leader_only(self):
'''Test that automatic backups to S3 only happen on the leader'''
node = None
cfg = None
path = None
backup_file = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
@ -366,7 +375,6 @@ class TestAutoBackupS3(unittest.TestCase):
"type": "s3",
"interval": "100ms",
"no_compress": True,
"vacuum": True,
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
@ -377,45 +385,36 @@ class TestAutoBackupS3(unittest.TestCase):
}
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a node, enable automatic backups, and start it. Then
# create a table and insert a row. Wait for a backup to happen.
node = Node(RQLITED_PATH, '0', auto_backup=cfg)
node.start()
node.wait_for_leader()
i = node.num_auto_backups()[0]
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
j = node.query('SELECT count(*) FROM foo', level='strong')
self.assertEqual(j, d_("{'results': [{'values': [[1]], 'types': ['integer'], 'columns': ['count(*)']}]}"))
node.wait_for_upload(i+1)
# Create a cluster with automatic backups enabled.
leader = Node(RQLITED_PATH, '0', auto_backup=cfg)
leader.start()
leader.wait_for_leader()
leader.wait_for_upload(1)
# Download the backup file from S3 and check it.
backup_data = download_s3_object(access_key_id, secret_access_key_id,
S3_BUCKET, path)
backup_file = write_random_file(backup_data, mode='wb')
conn = sqlite3.connect(backup_file)
c = conn.cursor()
c.execute('SELECT * FROM foo')
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][1], 'fiona')
conn.close()
follower = Node(RQLITED_PATH, '1', auto_backup=cfg)
follower.start(join=leader.RaftAddr())
follower.wait_for_leader()
# Then create a table and insert a row. Wait for a backup to happen.
leader.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
leader.wait_for_all_fsm()
leader.wait_for_upload(2)
# Confirm that the follower has performed no backups.
time.sleep(5)
self.assertEqual(follower.num_auto_backups()[0], 0)
deprovision_node(node)
os.remove(cfg)
os.remove(backup_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
deprovision_node(leader)
deprovision_node(follower)
os.remove(cfg)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_compress(self):
'''Test that automatic backups to S3 work with compression on'''
def test_no_upload_restart(self):
'''Test that restarting a node that already upload doesn't upload again'''
node = None
cfg = None
path = None
compressed_backup_file = None
backup_file = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
@ -425,48 +424,44 @@ class TestAutoBackupS3(unittest.TestCase):
"version": 1,
"type": "s3",
"interval": "100ms",
"no_compress": True,
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"path": path
}
}
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a node, enable automatic backups, and start it. Then
# create a table and insert a row. Wait for a backup to happen.
# Create a cluster with automatic backups enabled. An initial
# backup will happen because there is no data in the cloud.
node = Node(RQLITED_PATH, '0', auto_backup=cfg)
node.start()
node.wait_for_leader()
i = node.num_auto_backups()[0]
node.wait_for_upload(1)
# Then create a table and insert a row. Wait for another backup to happen.
node.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
for _ in range(100):
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_all_fsm()
j = node.query('SELECT count(*) FROM foo', level='strong')
self.assertEqual(j, d_("{'results': [{'values': [[100]], 'types': ['integer'], 'columns': ['count(*)']}]}"))
node.wait_for_upload(i+1, timeout=10)
node.wait_for_upload(2)
# Download the backup file from S3 and check it.
backup_data = download_s3_object(access_key_id, secret_access_key_id,
S3_BUCKET, path)
compressed_backup_file = write_random_file(backup_data, mode='wb')
backup_file = gunzip_file(compressed_backup_file)
conn = sqlite3.connect(backup_file)
c = conn.cursor()
c.execute('SELECT count(*) FROM foo WHERE name="fiona"')
rows = c.fetchall()
self.assertEqual(len(rows), 1)
self.assertEqual(rows[0][0], 100)
conn.close()
# Restart the node, and confirm no backup is uploaded due to the restart.
node.stop(graceful=True)
node.start()
node.wait_for_leader()
node.wait_for_upload_skipped_sum(1)
# Insert a row, make sure a backup will happen now.
node.execute('INSERT INTO foo(name) VALUES("fiona")')
node.wait_for_upload(1)
# Make sure we go back to skipping backups.
node.wait_until_uploads_idle()
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
deprovision_node(node)
os.remove(cfg)
os.remove(compressed_backup_file)
os.remove(backup_file)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
if __name__ == "__main__":

@ -382,43 +382,43 @@ class Node(object):
def wait_for_upload(self, i, timeout=TIMEOUT):
'''
Wait until the number of uploads is at least as great as the given value.
Wait until the number of uploads is equal to the given value.
'''
t = 0
while t < timeout:
if self.num_auto_backups()[0] >= i:
if self.num_auto_backups()[0] == i:
return self.num_auto_backups()
time.sleep(1)
time.sleep(0.1)
t+=1
n = self.num_auto_backups()
raise Exception('rqlite node failed to upload backup within %d seconds (%d, %d, %d, %d)' % (timeout, n[0], n[1], n[2], n[3]))
def wait_for_upload_skipped(self, i, timeout=TIMEOUT):
def wait_for_upload_skipped_sum(self, i, timeout=TIMEOUT):
'''
Wait until the number of skipped uploads is at least as great as the given value.
Wait until the number of skipped sum uploads is at least as great as the given value.
'''
t = 0
while t < timeout:
if self.num_auto_backups()[2] >= i:
if self.num_auto_backups()[3] >= i:
return self.num_auto_backups()
time.sleep(1)
time.sleep(0.1)
t+=1
n = self.num_auto_backups()
raise Exception('rqlite node failed to skip backup due sum within %d seconds (%d, %d, %d, %d)' % (timeout, n[0], n[1], n[2], n[3]))
def wait_for_upload_skipped_sum(self, i, timeout=TIMEOUT):
def wait_until_uploads_idle(self, timeout=TIMEOUT):
'''
Wait until the number of skipped sum uploads is at least as great as the given value.
Wait until uploads go idle.
'''
i = self.num_auto_backups()[2]
t = 0
while t < timeout:
if self.num_auto_backups()[3] >= i:
if self.num_auto_backups()[2] > i:
return self.num_auto_backups()
time.sleep(1)
time.sleep(0.1)
t+=1
n = self.num_auto_backups()
raise Exception('rqlite node failed to skip backup due sum within %d seconds (%d, %d, %d, %d)' % (timeout, n[0], n[1], n[2], n[3]))
raise Exception('rqlite node failed to idle backups within %d seconds (%d, %d, %d, %d)' % (timeout, n[0], n[1], n[2], n[3]))
def wait_for_fsm_index(self, index, timeout=TIMEOUT):
'''

Loading…
Cancel
Save