This commit is contained in:
Chris Lu 2021-09-14 10:37:06 -07:00
parent 119d5908dd
commit 2789d10342
16 changed files with 46 additions and 47 deletions

View File

@ -103,16 +103,16 @@ func (wo *WebDavOption) startWebDav() bool {
} }
ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{ ws, webdavServer_err := weed_server.NewWebDavServer(&weed_server.WebDavOption{
Filer: filerAddress, Filer: filerAddress,
GrpcDialOption: grpcDialOption, GrpcDialOption: grpcDialOption,
Collection: *wo.collection, Collection: *wo.collection,
Replication: *wo.replication, Replication: *wo.replication,
DiskType: *wo.disk, DiskType: *wo.disk,
Uid: uid, Uid: uid,
Gid: gid, Gid: gid,
Cipher: cipher, Cipher: cipher,
CacheDir: util.ResolvePath(*wo.cacheDir), CacheDir: util.ResolvePath(*wo.cacheDir),
CacheSizeMB: *wo.cacheSizeMB, CacheSizeMB: *wo.cacheSizeMB,
}) })
if webdavServer_err != nil { if webdavServer_err != nil {
glog.Fatalf("WebDav Server startup error: %v", webdavServer_err) glog.Fatalf("WebDav Server startup error: %v", webdavServer_err)

View File

@ -3,8 +3,8 @@ package ftpd
import ( import (
"crypto/tls" "crypto/tls"
"errors" "errors"
"net"
"github.com/chrislusf/seaweedfs/weed/util" "github.com/chrislusf/seaweedfs/weed/util"
"net"
ftpserver "github.com/fclairamb/ftpserverlib" ftpserver "github.com/fclairamb/ftpserverlib"
"google.golang.org/grpc" "google.golang.org/grpc"

View File

@ -33,10 +33,10 @@ type IamS3ApiConfigure struct {
} }
type IamServerOption struct { type IamServerOption struct {
Masters []pb.ServerAddress Masters []pb.ServerAddress
Filer pb.ServerAddress Filer pb.ServerAddress
Port int Port int
GrpcDialOption grpc.DialOption GrpcDialOption grpc.DialOption
} }
type IamApiServer struct { type IamApiServer struct {
@ -49,7 +49,7 @@ var s3ApiConfigure IamS3ApiConfig
func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) { func NewIamApiServer(router *mux.Router, option *IamServerOption) (iamApiServer *IamApiServer, err error) {
s3ApiConfigure = IamS3ApiConfigure{ s3ApiConfigure = IamS3ApiConfigure{
option: option, option: option,
masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", "",option.Masters), masterClient: wdclient.NewMasterClient(option.GrpcDialOption, pb.AdminShellClient, "", "", option.Masters),
} }
s3Option := s3api.S3ApiServerOption{Filer: option.Filer} s3Option := s3api.S3ApiServerOption{Filer: option.Filer}
iamApiServer = &IamApiServer{ iamApiServer = &IamApiServer{

View File

@ -96,7 +96,7 @@ func LookupVolumeIds(masterFn GetMasterFn, grpcDialOption grpc.DialOption, vids
locations = append(locations, Location{ locations = append(locations, Location{
Url: loc.Url, Url: loc.Url,
PublicUrl: loc.PublicUrl, PublicUrl: loc.PublicUrl,
GrpcPort: int(loc.GrpcPort), GrpcPort: int(loc.GrpcPort),
}) })
} }
if vidLocations.Error != "" { if vidLocations.Error != "" {

View File

@ -244,4 +244,4 @@ func WithOneOfGrpcFilerClients(filerAddresses []ServerAddress, grpcDialOption gr
} }
return err return err
} }

View File

@ -269,7 +269,7 @@ func (s *s3RemoteStorageClient) CreateBucket(name string) (err error) {
func (s *s3RemoteStorageClient) DeleteBucket(name string) (err error) { func (s *s3RemoteStorageClient) DeleteBucket(name string) (err error) {
_, err = s.conn.DeleteBucket(&s3.DeleteBucketInput{ _, err = s.conn.DeleteBucket(&s3.DeleteBucketInput{
Bucket: aws.String(name), Bucket: aws.String(name),
}) })
if err != nil { if err != nil {
return fmt.Errorf("delete bucket %s: %v", name, err) return fmt.Errorf("delete bucket %s: %v", name, err)

View File

@ -116,7 +116,7 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
replicas = append(replicas, &volume_server_pb.FetchAndWriteNeedleRequest_Replica{ replicas = append(replicas, &volume_server_pb.FetchAndWriteNeedleRequest_Replica{
Url: r.Url, Url: r.Url,
PublicUrl: r.PublicUrl, PublicUrl: r.PublicUrl,
GrpcPort: int32(r.GrpcPort), GrpcPort: int32(r.GrpcPort),
}) })
} }

View File

@ -64,7 +64,7 @@ func (fs *FilerServer) GetOrHeadHandler(w http.ResponseWriter, r *http.Request)
// set etag // set etag
etag := filer.ETagEntry(entry) etag := filer.ETagEntry(entry)
if ifm := r.Header.Get("If-Match"); ifm != "" && (ifm != "\""+etag+"\"" && ifm != etag){ if ifm := r.Header.Get("If-Match"); ifm != "" && (ifm != "\""+etag+"\"" && ifm != etag) {
w.WriteHeader(http.StatusPreconditionFailed) w.WriteHeader(http.StatusPreconditionFailed)
return return
} }

View File

@ -24,7 +24,7 @@ func (ms *MasterServer) SendHeartbeat(stream master_pb.Seaweed_SendHeartbeatServ
defer func() { defer func() {
if dn != nil { if dn != nil {
dn.Counter-- dn.Counter--
if dn.Counter > 0 { if dn.Counter > 0 {
glog.V(0).Infof("disconnect phantom volume server %s:%d remaining %d", dn.Ip, dn.Port, dn.Counter) glog.V(0).Infof("disconnect phantom volume server %s:%d remaining %d", dn.Ip, dn.Port, dn.Counter)
return return
} }

View File

@ -155,19 +155,19 @@ func (ms *MasterServer) Assign(ctx context.Context, req *master_pb.AssignRequest
replicas = append(replicas, &master_pb.Location{ replicas = append(replicas, &master_pb.Location{
Url: r.Url(), Url: r.Url(),
PublicUrl: r.PublicUrl, PublicUrl: r.PublicUrl,
GrpcPort: uint32(r.GrpcPort), GrpcPort: uint32(r.GrpcPort),
}) })
} }
return &master_pb.AssignResponse{ return &master_pb.AssignResponse{
Fid: fid, Fid: fid,
Location: &master_pb.Location{ Location: &master_pb.Location{
Url: dn.Url(), Url: dn.Url(),
PublicUrl: dn.PublicUrl, PublicUrl: dn.PublicUrl,
GrpcPort: uint32(dn.GrpcPort), GrpcPort: uint32(dn.GrpcPort),
}, },
Count: count, Count: count,
Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)), Auth: string(security.GenJwt(ms.guard.SigningKey, ms.guard.ExpiresAfterSec, fid)),
Replicas: replicas, Replicas: replicas,
}, nil }, nil
} }
//glog.V(4).Infoln("waiting for volume growing...") //glog.V(4).Infoln("waiting for volume growing...")

View File

@ -53,7 +53,7 @@ func (vs *VolumeServer) FetchAndWriteNeedle(ctx context.Context, req *volume_ser
} }
} }
}() }()
if len(req.Replicas)>0{ if len(req.Replicas) > 0 {
fileId := needle.NewFileId(v.Id, req.NeedleId, req.Cookie) fileId := needle.NewFileId(v.Id, req.NeedleId, req.Cookie)
for _, replica := range req.Replicas { for _, replica := range req.Replicas {
wg.Add(1) wg.Add(1)

View File

@ -27,18 +27,18 @@ import (
) )
type WebDavOption struct { type WebDavOption struct {
Filer pb.ServerAddress Filer pb.ServerAddress
DomainName string DomainName string
BucketsPath string BucketsPath string
GrpcDialOption grpc.DialOption GrpcDialOption grpc.DialOption
Collection string Collection string
Replication string Replication string
DiskType string DiskType string
Uid uint32 Uid uint32
Gid uint32 Gid uint32
Cipher bool Cipher bool
CacheDir string CacheDir string
CacheSizeMB int64 CacheSizeMB int64
} }
type WebDavServer struct { type WebDavServer struct {

View File

@ -79,7 +79,7 @@ func (c *commandRemoteCache) Do(args []string, commandEnv *CommandEnv, writer io
return nil return nil
} }
func (c *commandRemoteCache) doCacheOneDirectory(commandEnv *CommandEnv, writer io.Writer, dir string, fileFiler *FileFilter, concurrency int) (error) { func (c *commandRemoteCache) doCacheOneDirectory(commandEnv *CommandEnv, writer io.Writer, dir string, fileFiler *FileFilter, concurrency int) error {
mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, dir) mappings, localMountedDir, remoteStorageMountedLocation, remoteStorageConf, detectErr := detectMountInfo(commandEnv, writer, dir)
if detectErr != nil { if detectErr != nil {
jsonPrintln(writer, mappings) jsonPrintln(writer, mappings)

View File

@ -118,4 +118,3 @@ func (c *commandRemoteUnmount) purgeMountedData(commandEnv *CommandEnv, dir stri
return nil return nil
} }

View File

@ -31,7 +31,7 @@ func TestHandlingVolumeServerHeartbeat(t *testing.T) {
maxVolumeCounts := make(map[string]uint32) maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25 maxVolumeCounts[""] = 25
maxVolumeCounts["ssd"] = 12 maxVolumeCounts["ssd"] = 12
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0,"127.0.0.1", maxVolumeCounts) dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
{ {
volumeCount := 7 volumeCount := 7
@ -177,7 +177,7 @@ func TestAddRemoveVolume(t *testing.T) {
maxVolumeCounts := make(map[string]uint32) maxVolumeCounts := make(map[string]uint32)
maxVolumeCounts[""] = 25 maxVolumeCounts[""] = 25
maxVolumeCounts["ssd"] = 12 maxVolumeCounts["ssd"] = 12
dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0,"127.0.0.1", maxVolumeCounts) dn := rack.GetOrCreateDataNode("127.0.0.1", 34534, 0, "127.0.0.1", maxVolumeCounts)
v := storage.VolumeInfo{ v := storage.VolumeInfo{
Id: needle.VolumeId(1), Id: needle.VolumeId(1),

View File

@ -15,11 +15,11 @@ func DetectedHostAddress() string {
return "" return ""
} }
if v4Address := selectIpV4(netInterfaces, true); v4Address != ""{ if v4Address := selectIpV4(netInterfaces, true); v4Address != "" {
return v4Address return v4Address
} }
if v6Address := selectIpV4(netInterfaces, false); v6Address != ""{ if v6Address := selectIpV4(netInterfaces, false); v6Address != "" {
return v6Address return v6Address
} }
@ -59,4 +59,4 @@ func JoinHostPort(host string, port int) string {
return host + ":" + portStr return host + ":" + portStr
} }
return net.JoinHostPort(host, portStr) return net.JoinHostPort(host, portStr)
} }