1
0
Fork 0

Expose Raft Peers() on Store

master
Philip O'Toole 8 years ago
parent 7af21e5402
commit ef18cd5b17

@ -141,6 +141,7 @@ type Store struct {
raft *raft.Raft // The consensus mechanism.
raftTransport Transport
peerStore raft.PeerStore
dbConf *DBConfig // SQLite database config.
dbPath string // Path to underlying SQLite file, if not in-memory.
db *sql.DB // The underlying SQLite store.
@ -215,7 +216,7 @@ func (s *Store) Open(enableSingle bool) error {
transport := raft.NewNetworkTransport(s.raftTransport, 3, 10*time.Second, os.Stderr)
// Create peer storage.
peerStore := raft.NewJSONPeers(s.raftDir, transport)
s.peerStore = raft.NewJSONPeers(s.raftDir, transport)
// Create the snapshot store. This allows Raft to truncate the log.
snapshots, err := raft.NewFileSnapshotStore(s.raftDir, retainSnapshotCount, os.Stderr)
@ -230,7 +231,7 @@ func (s *Store) Open(enableSingle bool) error {
}
// Instantiate the Raft system.
ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, peerStore, transport)
ra, err := raft.NewRaft(config, s, logStore, logStore, snapshots, s.peerStore, transport)
if err != nil {
return fmt.Errorf("new raft: %s", err)
}
@ -292,6 +293,11 @@ func (s *Store) APIPeers() (map[string]string, error) {
return peers, nil
}
// Nodes returns the list of current peers.
func (s *Store) Nodes() ([]string, error) {
return s.peerStore.Peers()
}
// WaitForLeader blocks until a leader is detected, or the timeout expires.
func (s *Store) WaitForLeader(timeout time.Duration) (string, error) {
tck := time.NewTicker(leaderWaitDelay)

@ -7,6 +7,7 @@ import (
"os"
"path/filepath"
"reflect"
"sort"
"testing"
"time"
)
@ -179,6 +180,45 @@ func Test_SingleNodeExecuteQueryTx(t *testing.T) {
}
}
func Test_MultiNodeJoinRemove(t *testing.T) {
s0 := mustNewStore(true)
defer os.RemoveAll(s0.Path())
if err := s0.Open(true); err != nil {
t.Fatalf("failed to open node for multi-node test: %s", err.Error())
}
defer s0.Close(true)
s0.WaitForLeader(10 * time.Second)
s1 := mustNewStore(true)
defer os.RemoveAll(s1.Path())
if err := s1.Open(false); err != nil {
t.Fatalf("failed to open node for multi-node test: %s", err.Error())
}
defer s1.Close(true)
// Get sorted list of cluster nodes.
storeNodes := []string{s0.Addr().String(), s1.Addr().String()}
sort.StringSlice(storeNodes).Sort()
// Join the second node to the first.
if err := s0.Join(s1.Addr().String()); err != nil {
t.Fatalf("failed to join to node at %s: %s", s0.Addr().String(), err.Error())
}
nodes, err := s0.Nodes()
if err != nil {
t.Fatalf("failed to get nodes: %s", err.Error())
}
sort.StringSlice(nodes).Sort()
if len(nodes) != len(storeNodes) {
t.Fatalf("size of cluster is not correct")
}
if storeNodes[0] != nodes[0] && storeNodes[1] != nodes[1] {
t.Fatalf("cluster does not have correct nodes")
}
}
func Test_MultiNodeExecuteQuery(t *testing.T) {
s0 := mustNewStore(true)
defer os.RemoveAll(s0.Path())

Loading…
Cancel
Save