1
0
Fork 0

Add test for no-upload-after-leader-change

master
Philip O'Toole 8 months ago
parent 34aa54ddec
commit 778f363eb3

@ -6,7 +6,7 @@ import unittest
import sqlite3
import time
from helpers import Node, deprovision_node, write_random_file, random_string, env_present, gunzip_file, gzip_compress, temp_file, d_
from helpers import Node, deprovision_node, write_random_file, random_string, env_present, gunzip_file, gzip_compress, temp_file, d_, Cluster
from s3 import download_s3_object, delete_s3_object, upload_s3_object
S3_BUCKET = 'rqlite-testing-circleci'
@ -470,6 +470,67 @@ class TestAutoBackupS3(unittest.TestCase):
deprovision_node(node)
os.remove(cfg)
@unittest.skipUnless(env_present('RQLITE_S3_ACCESS_KEY'), "S3 credentials not available")
def test_no_upload_leader_change(self):
'''Test that when a cluster changes leader, the new leader doesn't upload again'''
node = None
cfg = None
path = None
access_key_id = os.environ['RQLITE_S3_ACCESS_KEY']
secret_access_key_id = os.environ['RQLITE_S3_SECRET_ACCESS_KEY']
# Create the auto-backup config file
path = random_string(32)
auto_backup_cfg = {
"version": 1,
"type": "s3",
"interval": "100ms",
"no_compress": True,
"sub" : {
"access_key_id": access_key_id,
"secret_access_key": secret_access_key_id,
"region": S3_BUCKET_REGION,
"bucket": S3_BUCKET,
"path": path
}
}
cfg = write_random_file(json.dumps(auto_backup_cfg))
# Create a cluster with automatic backups enabled. An initial
# backup will happen because there is no data in the cloud.
n0 = Node(RQLITED_PATH, '0', auto_backup=cfg)
n0.start()
n0.wait_for_leader()
n0.wait_for_upload(1)
# Then create a table and insert a row. Wait for another backup to happen.
n0.execute('CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT)')
n0.wait_for_upload(2)
# Create a cluster with two more followers
n1 = Node(RQLITED_PATH, '1', auto_backup=cfg)
n1.start(join=n0.RaftAddr())
n1.wait_for_leader()
n2 = Node(RQLITED_PATH, '2', auto_backup=cfg)
n2.start(join=n0.RaftAddr())
n2.wait_for_leader()
# Kill the leader, and get the new leader
cluster = Cluster([n0, n1, n2])
l = cluster.wait_for_leader()
l.stop(graceful=False)
new_leader = cluster.wait_for_leader(node_exc=l)
# Ensure new leader didn't do a backup
new_leader.wait_until_uploads_idle()
self.assertEquals(new_leader.num_auto_backups()[0], 0)
delete_s3_object(access_key_id, secret_access_key_id, S3_BUCKET, path)
deprovision_node(n0)
deprovision_node(n1)
deprovision_node(n2)
os.remove(cfg)
if __name__ == "__main__":
unittest.main(verbosity=2)

@ -413,10 +413,14 @@ class Node(object):
'''
Wait until uploads go idle.
'''
i = self.num_auto_backups()[2]
backups = self.num_auto_backups()[0]
skipped = self.num_auto_backups()[2]
skipped_sum = self.num_auto_backups()[3]
total_skipped = skipped + skipped_sum
t = 0
while t < timeout:
if self.num_auto_backups()[2] > i:
# No change in backups, and the other backups are being skipped?
if self.num_auto_backups()[0] == backups and (self.num_auto_backups()[2] + self.num_auto_backups()[3]) >total_skipped:
return self.num_auto_backups()
time.sleep(0.1)
t+=1

Loading…
Cancel
Save