mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-04-05 20:52:50 +08:00
parallelize remote content fetching
This commit is contained in:
parent
00ffbb4c9a
commit
f365af81c2
@ -78,66 +78,74 @@ func (fs *FilerServer) DownloadToLocal(ctx context.Context, req *filer_pb.Downlo
|
|||||||
dest := util.FullPath(remoteStorageMountedLocation.Path).Child(string(util.FullPath(req.Directory).Child(req.Name))[len(localMountedDir):])
|
dest := util.FullPath(remoteStorageMountedLocation.Path).Child(string(util.FullPath(req.Directory).Child(req.Name))[len(localMountedDir):])
|
||||||
|
|
||||||
var chunks []*filer_pb.FileChunk
|
var chunks []*filer_pb.FileChunk
|
||||||
|
var fetchAndWriteErr error
|
||||||
|
|
||||||
// FIXME limit on parallel
|
limitedConcurrentExecutor := util.NewLimitedConcurrentExecutor(8)
|
||||||
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
for offset := int64(0); offset < entry.Remote.RemoteSize; offset += chunkSize {
|
||||||
size := chunkSize
|
localOffset := offset
|
||||||
if offset+chunkSize > entry.Remote.RemoteSize {
|
|
||||||
size = entry.Remote.RemoteSize - offset
|
|
||||||
}
|
|
||||||
|
|
||||||
// assign one volume server
|
limitedConcurrentExecutor.Execute(func() {
|
||||||
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
|
size := chunkSize
|
||||||
if err != nil {
|
if localOffset+chunkSize > entry.Remote.RemoteSize {
|
||||||
return resp, err
|
size = entry.Remote.RemoteSize - localOffset
|
||||||
}
|
|
||||||
if assignResult.Error != "" {
|
|
||||||
return resp, fmt.Errorf("assign: %v", assignResult.Error)
|
|
||||||
}
|
|
||||||
fileId, parseErr := needle.ParseFileIdFromString(assignResult.Fid)
|
|
||||||
if assignResult.Error != "" {
|
|
||||||
return resp, fmt.Errorf("unrecognized file id %s: %v", assignResult.Fid, parseErr)
|
|
||||||
}
|
|
||||||
|
|
||||||
// tell filer to tell volume server to download into needles
|
|
||||||
err = operation.WithVolumeServerClient(assignResult.Url, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
|
||||||
_, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
|
|
||||||
VolumeId: uint32(fileId.VolumeId),
|
|
||||||
NeedleId: uint64(fileId.Key),
|
|
||||||
Cookie: uint32(fileId.Cookie),
|
|
||||||
Offset: offset,
|
|
||||||
Size: size,
|
|
||||||
RemoteType: storageConf.Type,
|
|
||||||
RemoteName: storageConf.Name,
|
|
||||||
S3AccessKey: storageConf.S3AccessKey,
|
|
||||||
S3SecretKey: storageConf.S3SecretKey,
|
|
||||||
S3Region: storageConf.S3Region,
|
|
||||||
S3Endpoint: storageConf.S3Endpoint,
|
|
||||||
RemoteBucket: remoteStorageMountedLocation.Bucket,
|
|
||||||
RemotePath: string(dest),
|
|
||||||
})
|
|
||||||
if fetchAndWriteErr != nil {
|
|
||||||
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
|
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
|
// assign one volume server
|
||||||
|
assignResult, err := operation.Assign(fs.filer.GetMaster, fs.grpcDialOption, assignRequest, altRequest)
|
||||||
|
if err != nil {
|
||||||
|
fetchAndWriteErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if assignResult.Error != "" {
|
||||||
|
fetchAndWriteErr = fmt.Errorf("assign: %v", assignResult.Error)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
fileId, parseErr := needle.ParseFileIdFromString(assignResult.Fid)
|
||||||
|
if assignResult.Error != "" {
|
||||||
|
fetchAndWriteErr = fmt.Errorf("unrecognized file id %s: %v", assignResult.Fid, parseErr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// tell filer to tell volume server to download into needles
|
||||||
|
err = operation.WithVolumeServerClient(assignResult.Url, fs.grpcDialOption, func(volumeServerClient volume_server_pb.VolumeServerClient) error {
|
||||||
|
_, fetchAndWriteErr := volumeServerClient.FetchAndWriteNeedle(context.Background(), &volume_server_pb.FetchAndWriteNeedleRequest{
|
||||||
|
VolumeId: uint32(fileId.VolumeId),
|
||||||
|
NeedleId: uint64(fileId.Key),
|
||||||
|
Cookie: uint32(fileId.Cookie),
|
||||||
|
Offset: localOffset,
|
||||||
|
Size: size,
|
||||||
|
RemoteType: storageConf.Type,
|
||||||
|
RemoteName: storageConf.Name,
|
||||||
|
S3AccessKey: storageConf.S3AccessKey,
|
||||||
|
S3SecretKey: storageConf.S3SecretKey,
|
||||||
|
S3Region: storageConf.S3Region,
|
||||||
|
S3Endpoint: storageConf.S3Endpoint,
|
||||||
|
RemoteBucket: remoteStorageMountedLocation.Bucket,
|
||||||
|
RemotePath: string(dest),
|
||||||
|
})
|
||||||
|
if fetchAndWriteErr != nil {
|
||||||
|
return fmt.Errorf("volume server %s fetchAndWrite %s: %v", assignResult.Url, dest, fetchAndWriteErr)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
fetchAndWriteErr = err
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks = append(chunks, &filer_pb.FileChunk{
|
||||||
|
FileId: assignResult.Fid,
|
||||||
|
Offset: localOffset,
|
||||||
|
Size: uint64(size),
|
||||||
|
Mtime: time.Now().Unix(),
|
||||||
|
Fid: &filer_pb.FileId{
|
||||||
|
VolumeId: uint32(fileId.VolumeId),
|
||||||
|
FileKey: uint64(fileId.Key),
|
||||||
|
Cookie: uint32(fileId.Cookie),
|
||||||
|
},
|
||||||
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
chunks = append(chunks, &filer_pb.FileChunk{
|
|
||||||
FileId: assignResult.Fid,
|
|
||||||
Offset: offset,
|
|
||||||
Size: uint64(size),
|
|
||||||
Mtime: time.Now().Unix(),
|
|
||||||
Fid: &filer_pb.FileId{
|
|
||||||
VolumeId: uint32(fileId.VolumeId),
|
|
||||||
FileKey: uint64(fileId.Key),
|
|
||||||
Cookie: uint32(fileId.Cookie),
|
|
||||||
},
|
|
||||||
})
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
garbage := entry.Chunks
|
garbage := entry.Chunks
|
||||||
|
Loading…
Reference in New Issue
Block a user