mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-04-05 20:52:50 +08:00
fix reader_at
This commit is contained in:
parent
5d80fc2ec7
commit
aec7f32b02
@ -19,6 +19,7 @@ type ChunkReadAt struct {
|
||||
bufferOffset int64
|
||||
lookupFileId func(fileId string) (targetUrl string, err error)
|
||||
readerLock sync.Mutex
|
||||
fileSize int64
|
||||
|
||||
chunkCache *chunk_cache.ChunkCache
|
||||
}
|
||||
@ -54,13 +55,14 @@ func LookupFn(filerClient filer_pb.FilerClient) LookupFileIdFunctionType {
|
||||
}
|
||||
}
|
||||
|
||||
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache) *ChunkReadAt {
|
||||
func NewChunkReaderAtFromClient(filerClient filer_pb.FilerClient, chunkViews []*ChunkView, chunkCache *chunk_cache.ChunkCache, fileSize int64) *ChunkReadAt {
|
||||
|
||||
return &ChunkReadAt{
|
||||
chunkViews: chunkViews,
|
||||
lookupFileId: LookupFn(filerClient),
|
||||
bufferOffset: -1,
|
||||
chunkCache: chunkCache,
|
||||
fileSize: fileSize,
|
||||
}
|
||||
}
|
||||
|
||||
@ -73,9 +75,6 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
|
||||
readCount, readErr := c.doReadAt(p[n:], offset+int64(n))
|
||||
n += readCount
|
||||
err = readErr
|
||||
if readCount == 0 {
|
||||
return n, io.EOF
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
@ -83,8 +82,11 @@ func (c *ChunkReadAt) ReadAt(p []byte, offset int64) (n int, err error) {
|
||||
func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||
|
||||
var found bool
|
||||
var chunkStart, chunkStop int64
|
||||
for _, chunk := range c.chunkViews {
|
||||
if chunk.LogicOffset <= offset && offset < chunk.LogicOffset+int64(chunk.Size) {
|
||||
// fmt.Printf(">>> doReadAt [%d,%d), chunk[%d,%d), %v && %v\n", offset, offset+int64(len(p)), chunk.LogicOffset, chunk.LogicOffset+int64(chunk.Size), chunk.LogicOffset <= offset, offset < chunk.LogicOffset+int64(chunk.Size))
|
||||
chunkStart, chunkStop = max(chunk.LogicOffset, offset), min(chunk.LogicOffset+int64(chunk.Size), offset+int64(len(p)))
|
||||
if chunkStart < chunkStop {
|
||||
found = true
|
||||
if c.bufferOffset != chunk.LogicOffset {
|
||||
c.buffer, err = c.fetchChunkData(chunk)
|
||||
@ -96,15 +98,23 @@ func (c *ChunkReadAt) doReadAt(p []byte, offset int64) (n int, err error) {
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return 0, io.EOF
|
||||
|
||||
// fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d), found:%v, err:%v\n", offset, offset+int64(len(p)), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)), found, err)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
n = copy(p, c.buffer[offset-c.bufferOffset:])
|
||||
if found {
|
||||
n = int(chunkStart-offset) + copy(p[chunkStart-offset:chunkStop-offset], c.buffer[chunkStart-c.bufferOffset:chunkStop-c.bufferOffset])
|
||||
return
|
||||
}
|
||||
|
||||
// fmt.Printf("> doReadAt [%d,%d), buffer:[%d,%d)\n", offset, offset+int64(n), c.bufferOffset, c.bufferOffset+int64(len(c.buffer)))
|
||||
n = len(p)
|
||||
if offset+int64(n) >= c.fileSize {
|
||||
err = io.EOF
|
||||
n = int(c.fileSize - offset)
|
||||
}
|
||||
|
||||
return
|
||||
|
||||
|
@ -82,10 +82,11 @@ func (fh *FileHandle) readFromDirtyPages(buff []byte, startOffset int64) (offset
|
||||
|
||||
func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
||||
|
||||
// this value should come from the filer instead of the old f
|
||||
if len(fh.f.entry.Chunks) == 0 {
|
||||
fileSize := int64(filer2.FileSize(fh.f.entry))
|
||||
|
||||
if fileSize == 0 {
|
||||
glog.V(1).Infof("empty fh %v", fh.f.fullpath())
|
||||
return 0, nil
|
||||
return 0, io.EOF
|
||||
}
|
||||
|
||||
var chunkResolveErr error
|
||||
@ -98,8 +99,9 @@ func (fh *FileHandle) readFromChunks(buff []byte, offset int64) (int64, error) {
|
||||
}
|
||||
|
||||
if fh.f.reader == nil {
|
||||
glog.V(1).Infof("entryViewCache %d", len(fh.f.entryViewCache))
|
||||
chunkViews := filer2.ViewFromVisibleIntervals(fh.f.entryViewCache, 0, math.MaxInt32)
|
||||
fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache)
|
||||
fh.f.reader = filer2.NewChunkReaderAtFromClient(fh.f.wfs, chunkViews, fh.f.wfs.chunkCache, fileSize)
|
||||
}
|
||||
|
||||
totalRead, err := fh.f.reader.ReadAt(buff, offset)
|
||||
|
@ -470,7 +470,8 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
if len(f.entry.Chunks) == 0 {
|
||||
fileSize := int64(filer2.FileSize(f.entry))
|
||||
if fileSize == 0 {
|
||||
return 0, io.EOF
|
||||
}
|
||||
if f.entryViewCache == nil {
|
||||
@ -479,7 +480,7 @@ func (f *WebDavFile) Read(p []byte) (readSize int, err error) {
|
||||
}
|
||||
if f.reader == nil {
|
||||
chunkViews := filer2.ViewFromVisibleIntervals(f.entryViewCache, 0, math.MaxInt32)
|
||||
f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache)
|
||||
f.reader = filer2.NewChunkReaderAtFromClient(f.fs, chunkViews, f.fs.chunkCache, fileSize)
|
||||
}
|
||||
|
||||
readSize, err = f.reader.ReadAt(p, f.off)
|
||||
|
Loading…
Reference in New Issue
Block a user