Remove rate limit semaphore on master's leader selection logic. (#6494)
Some checks failed
go: build dev binaries / cleanup (push) Has been cancelled
docker: build dev containers / build-dev-containers (push) Has been cancelled
End to End / FUSE Mount (push) Has been cancelled
go: build binary / Build (push) Has been cancelled
Ceph S3 tests / Ceph S3 tests (push) Has been cancelled
go: build dev binaries / build_dev_linux_windows (amd64, linux) (push) Has been cancelled
go: build dev binaries / build_dev_linux_windows (amd64, windows) (push) Has been cancelled
go: build dev binaries / build_dev_darwin (amd64, darwin) (push) Has been cancelled
go: build dev binaries / build_dev_darwin (arm64, darwin) (push) Has been cancelled

This was introduced by 054374c7 (2024-03-12) and serves no practical purpose,
yet it caps the maximum QPS master servers can handle.
This commit is contained in:
Lisandro Pin 2025-01-30 22:08:36 +01:00 committed by GitHub
parent 331c1f0f3f
commit fc4df944a0
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194

View File

@ -65,8 +65,6 @@ type MasterServer struct {
vg *topology.VolumeGrowth
volumeGrowthRequestChan chan *topology.VolumeGrowRequest
boundedLeaderChan chan int
// notifying clients
clientChansLock sync.RWMutex
clientChans map[string]chan *master_pb.KeepConnectedResponse
@ -122,7 +120,6 @@ func NewMasterServer(r *mux.Router, option *MasterOption, peers map[string]pb.Se
adminLocks: NewAdminLocks(),
Cluster: cluster.NewCluster(),
}
ms.boundedLeaderChan = make(chan int, 16)
ms.MasterClient.SetOnPeerUpdateFn(ms.OnPeerUpdate)
@ -228,8 +225,6 @@ func (ms *MasterServer) proxyToLeader(f http.HandlerFunc) http.HandlerFunc {
return
}
ms.boundedLeaderChan <- 1
defer func() { <-ms.boundedLeaderChan }()
targetUrl, err := url.Parse("http://" + raftServerLeader)
if err != nil {
writeJsonError(w, r, http.StatusInternalServerError,