1
0
Fork 0

Merge pull request #1446 from rqlite/e2e-7-upgrade

E2e 7 upgrade
master
Philip O'Toole 10 months ago committed by GitHub
commit 800d79c82e
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23

@ -174,6 +174,22 @@ jobs:
RQLITED_PATH: /home/circleci/go/bin/rqlited
resource_class: large
end_to_end_upgrade:
docker:
- image: << pipeline.parameters.primary_image >>
steps:
- checkout
- restore_and_save_cache
- run: go install -tags osusergo,netgo,sqlite_omit_load_extension
-ldflags="-extldflags=-static" ./...
- run:
command: |
cd system_test/e2e
python3 upgrade.py
environment:
RQLITED_PATH: /home/circleci/go/bin/rqlited
resource_class: large
end_to_end_joining:
docker:
- image: << pipeline.parameters.primary_image >>
@ -266,3 +282,4 @@ workflows:
- end_to_end_joining
- end_to_end_autoclustering
- end_to_end_auto_state
- end_to_end_upgrade

@ -87,8 +87,6 @@ class Node(object):
auth=None, auto_backup=None, auto_restore=None,
dir=None):
s_api = None
s_raft = None
if api_addr is None:
api_addr = random_addr()
if raft_addr is None:

@ -0,0 +1,28 @@
v7 cluster created using these commands.
./rqlited -raft-snap=16 -raft-snap-int=1s -node-id 1 data.1
./rqlited -raft-snap=16 -raft-snap-int=1s -http-addr localhost:4003 -raft-addr localhost:4004 -join localhost:4001 -node-id 2 data.2
./rqlited -raft-snap=16 -raft-snap-int=1s -http-addr localhost:4005 -raft-addr localhost:4006 -join localhost:4001 -node-id 3 data.3
-raft-snap=16 -raft-snap-int=1s
Query:
~/r7/rqlite-v7.21.4-linux-amd64 $ ./rqlite
Welcome to the rqlite CLI. Enter ".help" for usage hints.
Version v7.21.4, commit 971921f1352bdc73e4e66a1ec43be8c1028ff18b, branch master
Connected to rqlited version v7.21.4
127.0.0.1:4001> .schema
+----------------------------------------------------------------------------+
| sql |
+----------------------------------------------------------------------------+
| CREATE TABLE foo (id INTEGER NOT NULL PRIMARY KEY, name TEXT, age INTEGER) |
+----------------------------------------------------------------------------+
127.0.0.1:4001> select count(*) from foo;
+----------+
| count(*) |
+----------+
| 28 |
+----------+
127.0.0.1:4001>

Binary file not shown.

@ -0,0 +1 @@
{"Version":1,"ID":"2-22-1701735629016","Index":22,"Term":2,"Peers":"k65sb2NhbGhvc3Q6NDAwMq5sb2NhbGhvc3Q6NDAwNK5sb2NhbGhvc3Q6NDAwNg==","Configuration":{"Servers":[{"Suffrage":0,"ID":"1","Address":"localhost:4002"},{"Suffrage":0,"ID":"2","Address":"localhost:4004"},{"Suffrage":0,"ID":"3","Address":"localhost:4006"}]},"ConfigurationIndex":4,"Size":315,"CRC":"XRZtHUco9xU="}

Binary file not shown.

@ -0,0 +1 @@
{"Version":1,"ID":"2-19-1701735628052","Index":19,"Term":2,"Peers":"k65sb2NhbGhvc3Q6NDAwMq5sb2NhbGhvc3Q6NDAwNK5sb2NhbGhvc3Q6NDAwNg==","Configuration":{"Servers":[{"Suffrage":0,"ID":"1","Address":"localhost:4002"},{"Suffrage":0,"ID":"2","Address":"localhost:4004"},{"Suffrage":0,"ID":"3","Address":"localhost:4006"}]},"ConfigurationIndex":4,"Size":304,"CRC":"fidZwVIUbMc="}

Binary file not shown.

@ -0,0 +1 @@
{"Version":1,"ID":"2-22-1701735628941","Index":22,"Term":2,"Peers":"k65sb2NhbGhvc3Q6NDAwMq5sb2NhbGhvc3Q6NDAwNK5sb2NhbGhvc3Q6NDAwNg==","Configuration":{"Servers":[{"Suffrage":0,"ID":"1","Address":"localhost:4002"},{"Suffrage":0,"ID":"2","Address":"localhost:4004"},{"Suffrage":0,"ID":"3","Address":"localhost:4006"}]},"ConfigurationIndex":4,"Size":315,"CRC":"XRZtHUco9xU="}

@ -0,0 +1,36 @@
#!/usr/bin/env python
import os
import unittest
from helpers import Node, Cluster, d_
RQLITED_PATH = os.environ['RQLITED_PATH']
class TestUpgrade_v7(unittest.TestCase):
'''Test that a v7 cluster can be upgraded to this version code'''
def test(self):
n0 = Node(RQLITED_PATH, '1', api_addr='localhost:4001', raft_addr='localhost:4002', dir='testdata/v7/data.1')
n0.start()
n1 = Node(RQLITED_PATH, '2', api_addr='localhost:4003', raft_addr='localhost:4004', dir='testdata/v7/data.2')
n1.start()
n2 = Node(RQLITED_PATH, '3', api_addr='localhost:4005', raft_addr='localhost:4006', dir='testdata/v7/data.3')
n2.start()
self.cluster = Cluster([n0, n1, n2])
l = self.cluster.wait_for_leader()
# Check that each node upgraded a snapshot.
for n in self.cluster.nodes:
self.assertTrue(n.expvar()['snapshot']['upgrade_ok'] == 1)
# Check that each node has the right data.
for n in self.cluster.nodes:
self.assertEqual(n.query('SELECT COUNT(*) FROM foo', level='none'), d_("{'results': [{'values': [[28]], 'types': ['integer'], 'columns': ['COUNT(*)']}]}"))
def tearDown(self):
self.cluster.deprovision()
if __name__ == "__main__":
unittest.main(verbosity=2)
Loading…
Cancel
Save