From bfa18531aacc7bf50ca87c6c9f6a1b23eed3ee7e Mon Sep 17 00:00:00 2001 From: Philip O'Toole Date: Tue, 4 Apr 2023 10:11:06 -0400 Subject: [PATCH] Fix long-existing typo --- DOC/AUTO_CLUSTERING.md | 8 ++++---- system_test/e2e/auto_clustering.py | 8 ++++---- system_test/e2e/helpers.py | 2 +- 3 files changed, 9 insertions(+), 9 deletions(-) diff --git a/DOC/AUTO_CLUSTERING.md b/DOC/AUTO_CLUSTERING.md index 314a0f65..0a4dc7a7 100644 --- a/DOC/AUTO_CLUSTERING.md +++ b/DOC/AUTO_CLUSTERING.md @@ -7,7 +7,7 @@ This document describes various ways to dynamically form rqlite clusters, which ## Contents * [Quickstart](#quickstart) - * [Automatic Boostrapping](#automatic-bootstrapping) + * [Automatic Bootstrapping](#automatic-bootstrapping) * [Using DNS for Bootstrapping](#using-dns-for-bootstrapping) * [DNS SRV](#dns-srv) * [Kubernetes](#kubernetes) @@ -57,7 +57,7 @@ __________________________ ### Using DNS for Bootstrapping You can also use the Domain Name System (DNS) to bootstrap a cluster. This is similar to automatic clustering, but doesn't require you to specify the network addresses of other nodes at the command line. Instead you create a DNS record for the host `rqlite.local`, with an [A Record](https://www.cloudflare.com/learning/dns/dns-records/dns-a-record/) for each rqlite node's IP address. -To launch a node with node ID `$ID` and network address `$HOST`, using DNS for cluster boostrap, execute the following (example) command: +To launch a node with node ID `$ID` and network address `$HOST`, using DNS for cluster bootstrap, execute the following (example) command: ```bash rqlited -node-id $ID -http-addr=$HOST:4001 -raft-addr=$HOST:4002 \ -disco-mode=dns -disco-config='{"name":"rqlite.local"}' -bootstrap-expect 3 data @@ -67,7 +67,7 @@ You would launch other nodes similarly, setting `$ID` and `$HOST` as required fo #### DNS SRV Using [DNS SRV](https://www.cloudflare.com/learning/dns/dns-records/dns-srv-record/) gives you more control over the rqlite node address details returned by DNS, including the HTTP port each node is listening on. This means that unlike using just simple DNS records, each rqlite node can be listening on a different HTTP port. Simple DNS records are probably good enough for most situations, however. -To launch a node using DNS SRV boostrap, execute the following (example) command: +To launch a node using DNS SRV bootstrap, execute the following (example) command: ```bash rqlited -node-id $ID -http-addr=$HOST:4001 -raft-addr=$HOST:4002 \ -disco-mode=dns-srv -disco-config='{"name":"rqlite.local","service":"rqlite-svc"}' -bootstrap-expect 3 data @@ -151,7 +151,7 @@ The examples above demonstrates simple configurations, and most real deployments If you wish a single Consul or etcd key-value system to support multiple rqlite clusters, then set the `-disco-key` command line argument to a different value for each cluster. To run multiple rqlite clusters with DNS, use a different domain name per cluster. ## Design -When using Automatic Bootstrapping, each node notifies all other nodes of its existence. The first node to have a record of enough nodes (set by `-boostrap-expect`) forms the cluster. Only one node can bootstrap the cluster, any other node that attempts to do so later will fail, and instead become a Follower in the new cluster. +When using Automatic Bootstrapping, each node notifies all other nodes of its existence. The first node to have a record of enough nodes (set by `-bootstrap-expect`) forms the cluster. Only one node can bootstrap the cluster, any other node that attempts to do so later will fail, and instead become a Follower in the new cluster. When using either Consul or etcd for automatic clustering, rqlite uses the key-value store of each system. In each case the Leader atomically sets its HTTP URL, allowing other nodes to discover it. To prevent multiple nodes updating the Leader key at once, nodes uses a check-and-set operation, only updating the Leader key if it's value has not changed since it was last read by the node. See [this blog post](https://www.philipotoole.com/rqlite-7-0-designing-node-discovery-and-automatic-clustering/) for more details on the design. diff --git a/system_test/e2e/auto_clustering.py b/system_test/e2e/auto_clustering.py index 87cb9780..5c02e3bd 100644 --- a/system_test/e2e/auto_clustering.py +++ b/system_test/e2e/auto_clustering.py @@ -16,9 +16,9 @@ RQLITED_PATH = os.environ['RQLITED_PATH'] class TestBootstrapping(unittest.TestCase): '''Test simple bootstrapping works via -bootstrap-expect''' def test(self): - n0 = Node(RQLITED_PATH, '0', boostrap_expect=3) - n1 = Node(RQLITED_PATH, '1', boostrap_expect=3) - n2 = Node(RQLITED_PATH, '2', boostrap_expect=3) + n0 = Node(RQLITED_PATH, '0', bootstrap_expect=3) + n1 = Node(RQLITED_PATH, '1', bootstrap_expect=3) + n2 = Node(RQLITED_PATH, '2', bootstrap_expect=3) n0.start(join=','.join([n0.APIProtoAddr(), n1.APIProtoAddr(), n2.APIProtoAddr()])) n1.start(join=','.join([n0.APIProtoAddr(), n1.APIProtoAddr(), n2.APIProtoAddr()])) @@ -28,7 +28,7 @@ class TestBootstrapping(unittest.TestCase): self.assertEqual(n0.wait_for_leader(), n2.wait_for_leader()) # Ensure a 4th node can join later, with same launch params. - n3 = Node(RQLITED_PATH, '4', boostrap_expect=3) + n3 = Node(RQLITED_PATH, '4', bootstrap_expect=3) n3.start(join=','.join([n0.APIProtoAddr(), n1.APIProtoAddr(), n2.APIProtoAddr()])) n3.wait_for_leader() diff --git a/system_test/e2e/helpers.py b/system_test/e2e/helpers.py index 06b0a6cc..464ac384 100644 --- a/system_test/e2e/helpers.py +++ b/system_test/e2e/helpers.py @@ -145,7 +145,7 @@ class Node(object): command = [self.path, '-node-id', self.node_id, '-http-addr', self.api_addr, - '-bootstrap-expect', str(self.boostrap_expect), + '-bootstrap-expect', str(self.bootstrap_expect), '-raft-addr', self.raft_addr, '-raft-snap', str(self.raft_snap_threshold), '-raft-snap-int', self.raft_snap_int,