mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-04-05 20:52:50 +08:00
auto bootstraping and update peers
This commit is contained in:
parent
622297f1a7
commit
b7cdde14ae
@ -6,7 +6,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 9333:9333
|
- 9333:9333
|
||||||
- 19333:19333
|
- 19333:19333
|
||||||
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
|
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master0 -port=9333 -peers=master1:9334,master2:9335 -mdir=/data"
|
||||||
volumes:
|
volumes:
|
||||||
- ./master/0:/data
|
- ./master/0:/data
|
||||||
environment:
|
environment:
|
||||||
@ -18,7 +18,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 9334:9334
|
- 9334:9334
|
||||||
- 19334:19334
|
- 19334:19334
|
||||||
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
|
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master1 -port=9334 -peers=master0:9333,master2:9335 -mdir=/data"
|
||||||
volumes:
|
volumes:
|
||||||
- ./master/1:/data
|
- ./master/1:/data
|
||||||
environment:
|
environment:
|
||||||
@ -30,7 +30,7 @@ services:
|
|||||||
ports:
|
ports:
|
||||||
- 9335:9335
|
- 9335:9335
|
||||||
- 19335:19335
|
- 19335:19335
|
||||||
command: "-v=3 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -raftBootstrap=false -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
|
command: "-v=4 master -volumeSizeLimitMB 100 -resumeState=false -raftHashicorp=true -ip=master2 -port=9335 -peers=master0:9333,master1:9334 -mdir=/data"
|
||||||
volumes:
|
volumes:
|
||||||
- ./master/2:/data
|
- ./master/2:/data
|
||||||
environment:
|
environment:
|
||||||
@ -46,7 +46,6 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- master0
|
- master0
|
||||||
- master1
|
- master1
|
||||||
- master2
|
|
||||||
volume2:
|
volume2:
|
||||||
image: chrislusf/seaweedfs:local
|
image: chrislusf/seaweedfs:local
|
||||||
ports:
|
ports:
|
||||||
@ -56,7 +55,6 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- master0
|
- master0
|
||||||
- master1
|
- master1
|
||||||
- master2
|
|
||||||
volume3:
|
volume3:
|
||||||
image: chrislusf/seaweedfs:local
|
image: chrislusf/seaweedfs:local
|
||||||
ports:
|
ports:
|
||||||
@ -66,7 +64,6 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- master0
|
- master0
|
||||||
- master1
|
- master1
|
||||||
- master2
|
|
||||||
filer:
|
filer:
|
||||||
image: chrislusf/seaweedfs:local
|
image: chrislusf/seaweedfs:local
|
||||||
ports:
|
ports:
|
||||||
@ -77,7 +74,6 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- master0
|
- master0
|
||||||
- master1
|
- master1
|
||||||
- master2
|
|
||||||
- volume1
|
- volume1
|
||||||
- volume2
|
- volume2
|
||||||
s3:
|
s3:
|
||||||
@ -88,7 +84,6 @@ services:
|
|||||||
depends_on:
|
depends_on:
|
||||||
- master0
|
- master0
|
||||||
- master1
|
- master1
|
||||||
- master2
|
|
||||||
- volume1
|
- volume1
|
||||||
- volume2
|
- volume2
|
||||||
- filer
|
- filer
|
@ -16,6 +16,54 @@ import (
|
|||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
func (s *RaftServer) AddPeersConfiguration() (cfg raft.Configuration) {
|
||||||
|
for _, peer := range s.peers {
|
||||||
|
cfg.Servers = append(cfg.Servers, raft.Server{
|
||||||
|
Suffrage: raft.Voter,
|
||||||
|
ID: raft.ServerID(peer.String()),
|
||||||
|
Address: raft.ServerAddress(peer.ToGrpcAddress()),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *RaftServer) UpdatePeers() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case isLeader := <-s.RaftHashicorp.LeaderCh():
|
||||||
|
if isLeader {
|
||||||
|
peerLeader := s.serverAddr.String()
|
||||||
|
existsPeerName := make(map[string]bool)
|
||||||
|
for _, server := range s.RaftHashicorp.GetConfiguration().Configuration().Servers {
|
||||||
|
if string(server.ID) == peerLeader {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
existsPeerName[string(server.ID)] = true
|
||||||
|
}
|
||||||
|
for _, peer := range s.peers {
|
||||||
|
if peer.String() == peerLeader || existsPeerName[peer.String()] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
glog.V(0).Infof("adding new peer: %s", peer.String())
|
||||||
|
s.RaftHashicorp.AddVoter(
|
||||||
|
raft.ServerID(peer.String()), raft.ServerAddress(peer.ToGrpcAddress()), 0, 0)
|
||||||
|
}
|
||||||
|
for peer, _ := range existsPeerName {
|
||||||
|
if _, found := s.peers[peer]; !found {
|
||||||
|
glog.V(0).Infof("removing old peer: %s", peer)
|
||||||
|
s.RaftHashicorp.RemoveServer(raft.ServerID(peer), 0, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if _, found := s.peers[peerLeader]; !found {
|
||||||
|
glog.V(0).Infof("removing old leader peer: %s", peerLeader)
|
||||||
|
s.RaftHashicorp.RemoveServer(raft.ServerID(peerLeader), 0, 0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
||||||
s := &RaftServer{
|
s := &RaftServer{
|
||||||
peers: option.Peers,
|
peers: option.Peers,
|
||||||
@ -25,7 +73,7 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
c := raft.DefaultConfig()
|
c := raft.DefaultConfig()
|
||||||
c.LocalID = raft.ServerID(s.serverAddr) // TODO maybee the IP:port address will change
|
c.LocalID = raft.ServerID(s.serverAddr.String()) // TODO maybee the IP:port address will change
|
||||||
c.NoSnapshotRestoreOnStart = option.RaftResumeState
|
c.NoSnapshotRestoreOnStart = option.RaftResumeState
|
||||||
c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
|
c.HeartbeatTimeout = time.Duration(float64(option.HeartbeatInterval) * (rand.Float64()*0.25 + 1))
|
||||||
c.ElectionTimeout = option.ElectionTimeout
|
c.ElectionTimeout = option.ElectionTimeout
|
||||||
@ -66,32 +114,17 @@ func NewHashicorpRaftServer(option *RaftServerOption) (*RaftServer, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, fmt.Errorf("raft.NewRaft: %v", err)
|
return nil, fmt.Errorf("raft.NewRaft: %v", err)
|
||||||
}
|
}
|
||||||
if option.RaftBootstrap {
|
if option.RaftBootstrap || len(s.RaftHashicorp.GetConfiguration().Configuration().Servers) == 0 {
|
||||||
cfg := raft.Configuration{
|
cfg := s.AddPeersConfiguration()
|
||||||
Servers: []raft.Server{
|
glog.V(0).Infoln("Bootstrapping new cluster %+v", cfg)
|
||||||
{
|
|
||||||
Suffrage: raft.Voter,
|
|
||||||
ID: c.LocalID,
|
|
||||||
Address: raft.ServerAddress(s.serverAddr.ToGrpcAddress()),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
// Add known peers to bootstrap
|
|
||||||
for _, peer := range option.Peers {
|
|
||||||
if peer == option.ServerAddr {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
cfg.Servers = append(cfg.Servers, raft.Server{
|
|
||||||
Suffrage: raft.Voter,
|
|
||||||
ID: raft.ServerID(peer),
|
|
||||||
Address: raft.ServerAddress(peer.ToGrpcAddress()),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
f := s.RaftHashicorp.BootstrapCluster(cfg)
|
f := s.RaftHashicorp.BootstrapCluster(cfg)
|
||||||
if err := f.Error(); err != nil {
|
if err := f.Error(); err != nil {
|
||||||
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
|
return nil, fmt.Errorf("raft.Raft.BootstrapCluster: %v", err)
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
go s.UpdatePeers()
|
||||||
}
|
}
|
||||||
|
|
||||||
ticker := time.NewTicker(c.HeartbeatTimeout * 10)
|
ticker := time.NewTicker(c.HeartbeatTimeout * 10)
|
||||||
if glog.V(4) {
|
if glog.V(4) {
|
||||||
go func() {
|
go func() {
|
||||||
|
Loading…
Reference in New Issue
Block a user