diff --git a/CHANGELOG.md b/CHANGELOG.md index f8b6743c..4110cfdd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,7 @@ +## 8.14.1 (December 26th 2023) +### Implementation changes and bug fixes +- [PR #1546](https://github.com/rqlite/rqlite/pull/1546): Don't hardcode suffrage when joining. Fixes issue [#1525](https://github.com/rqlite/rqlite/issues/1525). Thanks @jtackaberry + ## 8.14.0 (December 31st 2023) This release adds new control over Raft snapshotting, a key part of the Raft consensus protocol. When the WAL file reaches a certain size (4MB by default, which equals the SQLite default), rqlite will trigger a Raft snapshot. In its default setting this change may reduce disk usage, but may also result in more frequent Raft snapshotting. Most users can ignore this change and carry on as before after upgrading to this release. ### New features diff --git a/cluster/bootstrap.go b/cluster/bootstrap.go index 70ee3276..f3500211 100644 --- a/cluster/bootstrap.go +++ b/cluster/bootstrap.go @@ -180,7 +180,7 @@ func (b *Bootstrapper) Boot(id, raftAddr string, suf Suffrage, done func() bool, // Try an explicit join first. Joining an existing cluster is always given priority // over trying to form a new cluster. if j, err := joiner.Do(targets, id, raftAddr, suf); err == nil { - b.logger.Printf("succeeded directly joining cluster via node at %s", j) + b.logger.Printf("succeeded directly joining cluster via node at %s as %s", j, suf) b.setBootStatus(BootJoin) return nil } diff --git a/cmd/rqlited/main.go b/cmd/rqlited/main.go index 1077dc7e..36ca8aed 100644 --- a/cmd/rqlited/main.go +++ b/cmd/rqlited/main.go @@ -459,7 +459,7 @@ func createCluster(cfg *Config, hasPeers bool, client *cluster.Client, str *stor // Bootstrap with explicit join addresses requests. bs := cluster.NewBootstrapper(cluster.NewAddressProviderString(joins), client) bs.SetCredentials(cluster.CredentialsFor(credStr, cfg.JoinAs)) - return bs.Boot(str.ID(), cfg.RaftAdv, cluster.Voter, isClustered, cfg.BootstrapExpectTimeout) + return bs.Boot(str.ID(), cfg.RaftAdv, cluster.VoterSuffrage(!cfg.RaftNonVoter), isClustered, cfg.BootstrapExpectTimeout) } if cfg.DiscoMode == "" { @@ -503,7 +503,7 @@ func createCluster(cfg *Config, hasPeers bool, client *cluster.Client, str *stor bs := cluster.NewBootstrapper(provider, client) bs.SetCredentials(cluster.CredentialsFor(credStr, cfg.JoinAs)) httpServ.RegisterStatus("disco", provider) - return bs.Boot(str.ID(), cfg.RaftAdv, cluster.Voter, isClustered, cfg.BootstrapExpectTimeout) + return bs.Boot(str.ID(), cfg.RaftAdv, cluster.VoterSuffrage(!cfg.RaftNonVoter), isClustered, cfg.BootstrapExpectTimeout) case DiscoModeEtcdKV, DiscoModeConsulKV: discoService, err := createDiscoService(cfg, str) diff --git a/system_test/e2e/auto_clustering.py b/system_test/e2e/auto_clustering.py index 086e8e03..41757095 100644 --- a/system_test/e2e/auto_clustering.py +++ b/system_test/e2e/auto_clustering.py @@ -175,6 +175,11 @@ class TestAutoClusteringDNS(unittest.TestCase): self.nodes.append(n3) self.assertEqual(n0.wait_for_leader(), n3.wait_for_leader()) + self.assertTrue(n0.is_voter()) + self.assertTrue(n1.is_voter()) + self.assertTrue(n2.is_voter()) + self.assertFalse(n3.is_voter()) + def tearDown(self): del os.environ['RQLITE_DISCO_DNS_HOSTS'] for n in self.nodes: