This commit is contained in:
chrislu 2024-05-20 11:03:56 -07:00
parent d3032d1e80
commit d218fe54fa
26 changed files with 92 additions and 95 deletions

View File

@ -67,7 +67,7 @@ func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64)
maxStop = max(maxStop, logicStop) maxStop = max(maxStop, logicStop)
if t.TsNs >= tsNs { if t.TsNs >= tsNs {
println("read new data1", t.TsNs - tsNs, "ns") println("read new data1", t.TsNs-tsNs, "ns")
} }
} }
} }

View File

@ -137,7 +137,7 @@ func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop in
maxStop = max(maxStop, logicStop) maxStop = max(maxStop, logicStop)
if t.TsNs >= tsNs { if t.TsNs >= tsNs {
println("read new data2", t.TsNs - tsNs, "ns") println("read new data2", t.TsNs-tsNs, "ns")
} }
} }
} }

View File

@ -35,7 +35,6 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
} }
} }
t := topic.FromPbTopic(request.Topic) t := topic.FromPbTopic(request.Topic)
var readErr, assignErr error var readErr, assignErr error
resp, readErr = b.readTopicConfFromFiler(t) resp, readErr = b.readTopicConfFromFiler(t)

View File

@ -69,7 +69,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
return stream.Send(response) return stream.Send(response)
} }
var receivedSequence, acknowledgedSequence int64 var receivedSequence, acknowledgedSequence int64
var isClosed bool var isClosed bool
// start sending ack to publisher // start sending ack to publisher
@ -85,7 +85,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
lastAckTime := time.Now() lastAckTime := time.Now()
for !isClosed { for !isClosed {
receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs) receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs)
if acknowledgedSequence < receivedSequence && (receivedSequence - acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second){ if acknowledgedSequence < receivedSequence && (receivedSequence-acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second) {
acknowledgedSequence = receivedSequence acknowledgedSequence = receivedSequence
response := &mq_pb.PublishMessageResponse{ response := &mq_pb.PublishMessageResponse{
AckSequence: acknowledgedSequence, AckSequence: acknowledgedSequence,
@ -101,7 +101,6 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
} }
}() }()
// process each published messages // process each published messages
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition) clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher()) localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())

View File

@ -13,10 +13,11 @@ import (
) )
type memBuffer struct { type memBuffer struct {
buf []byte buf []byte
startTime time.Time startTime time.Time
stopTime time.Time stopTime time.Time
} }
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) { func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) {
var req *mq_pb.PublishFollowMeRequest var req *mq_pb.PublishFollowMeRequest
req, err = stream.Recv() req, err = stream.Recv()
@ -84,7 +85,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
} }
} }
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition) t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
logBuffer.ShutdownLogBuffer() logBuffer.ShutdownLogBuffer()
@ -97,7 +97,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT) partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop) partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
// flush the remaining messages // flush the remaining messages
inMemoryBuffers.CloseInput() inMemoryBuffers.CloseInput()
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() { for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() {

View File

@ -45,7 +45,7 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
b.accessLock.Lock() b.accessLock.Lock()
defer b.accessLock.Unlock() defer b.accessLock.Unlock()
p := topic.FromPbPartition(partition) p := topic.FromPbPartition(partition)
if localPartition:=b.localTopicManager.GetLocalPartition(t, p); localPartition!=nil { if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil {
localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs) localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
} }

View File

@ -16,7 +16,7 @@ var (
concurrency = flag.Int("c", 4, "concurrent publishers") concurrency = flag.Int("c", 4, "concurrent publishers")
partitionCount = flag.Int("p", 6, "partition count") partitionCount = flag.Int("p", 6, "partition count")
clientName = flag.String("client", "c1", "client name") clientName = flag.String("client", "c1", "client name")
namespace = flag.String("ns", "test", "namespace") namespace = flag.String("ns", "test", "namespace")
t = flag.String("t", "test", "t") t = flag.String("t", "test", "t")

View File

@ -19,7 +19,7 @@ var (
concurrency = flag.Int("c", 4, "concurrent publishers") concurrency = flag.Int("c", 4, "concurrent publishers")
partitionCount = flag.Int("p", 6, "partition count") partitionCount = flag.Int("p", 6, "partition count")
clientName = flag.String("client", "c1", "client name") clientName = flag.String("client", "c1", "client name")
namespace = flag.String("ns", "test", "namespace") namespace = flag.String("ns", "test", "namespace")
t = flag.String("t", "test", "t") t = flag.String("t", "test", "t")

View File

@ -48,7 +48,7 @@ func (p *TopicPublisher) FinishPublish() error {
if inputBuffers, found := p.partition2Buffer.AllIntersections(0, pub_balancer.MaxPartitionCount); found { if inputBuffers, found := p.partition2Buffer.AllIntersections(0, pub_balancer.MaxPartitionCount); found {
for _, inputBuffer := range inputBuffers { for _, inputBuffer := range inputBuffers {
inputBuffer.Enqueue(&mq_pb.DataMessage{ inputBuffer.Enqueue(&mq_pb.DataMessage{
TsNs: time.Now().UnixNano(), TsNs: time.Now().UnixNano(),
Ctrl: &mq_pb.ControlMessage{ Ctrl: &mq_pb.ControlMessage{
IsClose: true, IsClose: true,
}, },

View File

@ -122,7 +122,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
StartTsNs: sub.alreadyProcessedTsNs, StartTsNs: sub.alreadyProcessedTsNs,
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY, StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
}, },
Filter: sub.ContentConfig.Filter, Filter: sub.ContentConfig.Filter,
FollowerBrokers: assigned.FollowerBrokers, FollowerBrokers: assigned.FollowerBrokers,
}, },
}, },

View File

@ -17,7 +17,7 @@ type BrokerStats struct {
} }
type TopicPartitionStats struct { type TopicPartitionStats struct {
topic.TopicPartition topic.TopicPartition
PublisherCount int32 PublisherCount int32
SubscriberCount int32 SubscriberCount int32
} }
@ -48,7 +48,7 @@ func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) {
UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs, UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs,
}, },
}, },
PublisherCount: topicPartitionStats.PublisherCount, PublisherCount: topicPartitionStats.PublisherCount,
SubscriberCount: topicPartitionStats.SubscriberCount, SubscriberCount: topicPartitionStats.SubscriberCount,
} }
publisherCount += topicPartitionStats.PublisherCount publisherCount += topicPartitionStats.PublisherCount
@ -76,7 +76,7 @@ func (bs *BrokerStats) RegisterAssignment(t *mq_pb.Topic, partition *mq_pb.Parti
UnixTimeNs: partition.UnixTimeNs, UnixTimeNs: partition.UnixTimeNs,
}, },
}, },
PublisherCount: 0, PublisherCount: 0,
SubscriberCount: 0, SubscriberCount: 0,
} }
key := tps.TopicPartition.String() key := tps.TopicPartition.String()

View File

@ -6,17 +6,17 @@ import (
type Schema struct { type Schema struct {
RecordType *schema_pb.RecordType RecordType *schema_pb.RecordType
fieldMap map[string]*schema_pb.Field fieldMap map[string]*schema_pb.Field
} }
func NewSchema(recordType *schema_pb.RecordType) (*Schema, error) { func NewSchema(recordType *schema_pb.RecordType) (*Schema, error) {
fieldMap := make( map[string]*schema_pb.Field) fieldMap := make(map[string]*schema_pb.Field)
for _, field := range recordType.Fields { for _, field := range recordType.Fields {
fieldMap[field.Name] = field fieldMap[field.Name] = field
} }
return &Schema{ return &Schema{
RecordType: recordType, RecordType: recordType,
fieldMap: fieldMap, fieldMap: fieldMap,
}, nil }, nil
} }

View File

@ -8,9 +8,9 @@ import (
var ( var (
TypeBoolean = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BOOL}} TypeBoolean = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BOOL}}
TypeInt32 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT32}} TypeInt32 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT32}}
TypeInt64 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT64}} TypeInt64 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT64}}
TypeFloat = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_FLOAT}} TypeFloat = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_FLOAT}}
TypeDouble = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DOUBLE}} TypeDouble = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DOUBLE}}
TypeBytes = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BYTES}} TypeBytes = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BYTES}}
TypeString = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_STRING}} TypeString = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_STRING}}
) )

View File

@ -32,10 +32,10 @@ func TestEnumScalarType(t *testing.T) {
func TestField(t *testing.T) { func TestField(t *testing.T) {
field := &Field{ field := &Field{
Name: "field_name", Name: "field_name",
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
FieldIndex: 1, FieldIndex: 1,
IsRepeated: false, IsRepeated: false,
} }
assert.NotNil(t, field) assert.NotNil(t, field)
} }
@ -44,32 +44,32 @@ func TestRecordType(t *testing.T) {
subRecord := &RecordType{ subRecord := &RecordType{
Fields: []*Field{ Fields: []*Field{
{ {
Name: "field_1", Name: "field_1",
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
FieldIndex: 1, FieldIndex: 1,
IsRepeated: false, IsRepeated: false,
}, },
{ {
Name: "field_2", Name: "field_2",
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_STRING}}, Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_STRING}},
FieldIndex: 2, FieldIndex: 2,
IsRepeated: false, IsRepeated: false,
}, },
}, },
} }
record := &RecordType{ record := &RecordType{
Fields: []*Field{ Fields: []*Field{
{ {
Name: "field_key", Name: "field_key",
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}}, Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
FieldIndex: 1, FieldIndex: 1,
IsRepeated: false, IsRepeated: false,
}, },
{ {
Name: "field_record", Name: "field_record",
Type: &Type{Kind: &Type_RecordType{RecordType: subRecord}}, Type: &Type{Kind: &Type_RecordType{RecordType: subRecord}},
FieldIndex: 2, FieldIndex: 2,
IsRepeated: false, IsRepeated: false,
}, },
}, },
} }

View File

@ -76,7 +76,7 @@ func TestStructToSchema(t *testing.T) {
RecordTypeBegin(). RecordTypeBegin().
WithField("Field3", TypeString). WithField("Field3", TypeString).
WithField("Field4", TypeInt32). WithField("Field4", TypeInt32).
RecordTypeEnd(), RecordTypeEnd(),
). ).
RecordTypeEnd(), RecordTypeEnd(),
}, },
@ -104,7 +104,7 @@ func TestStructToSchema(t *testing.T) {
RecordTypeBegin(). RecordTypeBegin().
WithField("Field6", TypeString). WithField("Field6", TypeString).
WithField("Field7", TypeBytes). WithField("Field7", TypeBytes).
RecordTypeEnd(), RecordTypeEnd(),
).RecordTypeEnd(), ).RecordTypeEnd(),
). ).
RecordTypeEnd(), RecordTypeEnd(),

View File

@ -7,9 +7,9 @@ import (
type ParquetLevels struct { type ParquetLevels struct {
startColumnIndex int startColumnIndex int
endColumnIndex int endColumnIndex int
definitionDepth int definitionDepth int
levels map[string]*ParquetLevels levels map[string]*ParquetLevels
} }
func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) { func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) {
@ -19,7 +19,7 @@ func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) {
func toFieldTypeLevels(fieldType *schema_pb.Type, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { func toFieldTypeLevels(fieldType *schema_pb.Type, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
switch fieldType.Kind.(type) { switch fieldType.Kind.(type) {
case *schema_pb.Type_ScalarType: case *schema_pb.Type_ScalarType:
return toFieldTypeScalarLevels(fieldType.GetScalarType(), startColumnIndex, definitionDepth) return toFieldTypeScalarLevels(fieldType.GetScalarType(), startColumnIndex, definitionDepth)
case *schema_pb.Type_RecordType: case *schema_pb.Type_RecordType:
return toRecordTypeLevels(fieldType.GetRecordType(), startColumnIndex, definitionDepth) return toRecordTypeLevels(fieldType.GetRecordType(), startColumnIndex, definitionDepth)
case *schema_pb.Type_ListType: case *schema_pb.Type_ListType:
@ -35,15 +35,15 @@ func toFieldTypeListLevels(listType *schema_pb.ListType, startColumnIndex, defin
func toFieldTypeScalarLevels(scalarType schema_pb.ScalarType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { func toFieldTypeScalarLevels(scalarType schema_pb.ScalarType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
return &ParquetLevels{ return &ParquetLevels{
startColumnIndex: startColumnIndex, startColumnIndex: startColumnIndex,
endColumnIndex: startColumnIndex + 1, endColumnIndex: startColumnIndex + 1,
definitionDepth: definitionDepth, definitionDepth: definitionDepth,
}, nil }, nil
} }
func toRecordTypeLevels(recordType *schema_pb.RecordType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) { func toRecordTypeLevels(recordType *schema_pb.RecordType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
recordTypeLevels := &ParquetLevels{ recordTypeLevels := &ParquetLevels{
startColumnIndex: startColumnIndex, startColumnIndex: startColumnIndex,
definitionDepth: definitionDepth, definitionDepth: definitionDepth,
levels: make(map[string]*ParquetLevels), levels: make(map[string]*ParquetLevels),
} }
for _, field := range recordType.Fields { for _, field := range recordType.Fields {
fieldTypeLevels, err := toFieldTypeLevels(field.Type, startColumnIndex, definitionDepth+1) fieldTypeLevels, err := toFieldTypeLevels(field.Type, startColumnIndex, definitionDepth+1)

View File

@ -11,9 +11,9 @@ func TestToParquetLevels(t *testing.T) {
recordType *schema_pb.RecordType recordType *schema_pb.RecordType
} }
tests := []struct { tests := []struct {
name string name string
args args args args
want *ParquetLevels want *ParquetLevels
}{ }{
{ {
name: "nested type", name: "nested type",
@ -25,13 +25,13 @@ func TestToParquetLevels(t *testing.T) {
RecordTypeBegin(). RecordTypeBegin().
WithField("zName", TypeString). WithField("zName", TypeString).
WithField("emails", ListOf(TypeString)). WithField("emails", ListOf(TypeString)).
RecordTypeEnd()). RecordTypeEnd()).
WithField("Company", TypeString). WithField("Company", TypeString).
WithRecordField("Address", WithRecordField("Address",
RecordTypeBegin(). RecordTypeBegin().
WithField("Street", TypeString). WithField("Street", TypeString).
WithField("City", TypeString). WithField("City", TypeString).
RecordTypeEnd()). RecordTypeEnd()).
RecordTypeEnd(), RecordTypeEnd(),
}, },
want: &ParquetLevels{ want: &ParquetLevels{

View File

@ -31,7 +31,6 @@ func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err e
return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind) return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind)
} }
return dataType, err return dataType, err
} }

View File

@ -70,7 +70,7 @@ func doVisitValue(fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *
return return
} }
func toParquetValue(value *schema_pb.Value) (parquet.Value, error) { func toParquetValue(value *schema_pb.Value) (parquet.Value, error) {
switch value.Kind.(type) { switch value.Kind.(type) {
case *schema_pb.Value_BoolValue: case *schema_pb.Value_BoolValue:
return parquet.BooleanValue(value.GetBoolValue()), nil return parquet.BooleanValue(value.GetBoolValue()), nil

View File

@ -47,7 +47,7 @@ func toRecordValue(recordType *schema_pb.RecordType, levels *ParquetLevels, valu
func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) { func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) {
listValues := make([]*schema_pb.Value, 0) listValues := make([]*schema_pb.Value, 0)
var value *schema_pb.Value var value *schema_pb.Value
for ;valueIndex < len(values); { for valueIndex < len(values) {
if values[valueIndex].Column() != levels.startColumnIndex { if values[valueIndex].Column() != levels.startColumnIndex {
break break
} }
@ -67,19 +67,19 @@ func toScalarValue(scalarType schema_pb.ScalarType, levels *ParquetLevels, value
} }
switch scalarType { switch scalarType {
case schema_pb.ScalarType_BOOL: case schema_pb.ScalarType_BOOL:
return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex + 1, nil
case schema_pb.ScalarType_INT32: case schema_pb.ScalarType_INT32:
return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex + 1, nil
case schema_pb.ScalarType_INT64: case schema_pb.ScalarType_INT64:
return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex + 1, nil
case schema_pb.ScalarType_FLOAT: case schema_pb.ScalarType_FLOAT:
return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex + 1, nil
case schema_pb.ScalarType_DOUBLE: case schema_pb.ScalarType_DOUBLE:
return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex + 1, nil
case schema_pb.ScalarType_BYTES: case schema_pb.ScalarType_BYTES:
return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex + 1, nil
case schema_pb.ScalarType_STRING: case schema_pb.ScalarType_STRING:
return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex+1, nil return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex + 1, nil
} }
return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType) return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType)
} }

View File

@ -19,13 +19,13 @@ func TestWriteReadParquet(t *testing.T) {
RecordTypeBegin(). RecordTypeBegin().
WithField("zName", TypeString). WithField("zName", TypeString).
WithField("emails", ListOf(TypeString)). WithField("emails", ListOf(TypeString)).
RecordTypeEnd()). RecordTypeEnd()).
WithField("Company", TypeString). WithField("Company", TypeString).
WithRecordField("Address", WithRecordField("Address",
RecordTypeBegin(). RecordTypeBegin().
WithField("Street", TypeString). WithField("Street", TypeString).
WithField("City", TypeString). WithField("City", TypeString).
RecordTypeEnd()). RecordTypeEnd()).
RecordTypeEnd() RecordTypeEnd()
fmt.Printf("RecordType: %v\n", recordType) fmt.Printf("RecordType: %v\n", recordType)
@ -85,9 +85,9 @@ func testWritingParquetFile(t *testing.T, count int, filename string, parquetSch
fmt.Sprintf("john_%d@c.com", i), fmt.Sprintf("john_%d@c.com", i),
fmt.Sprintf("john_%d@d.com", i), fmt.Sprintf("john_%d@d.com", i),
fmt.Sprintf("john_%d@e.com", i)). fmt.Sprintf("john_%d@e.com", i)).
RecordEnd()). RecordEnd()).
SetString("Company", fmt.Sprintf("company_%d", i)). SetString("Company", fmt.Sprintf("company_%d", i)).
RecordEnd() RecordEnd()
AddRecordValue(rowBuilder, recordType, parquetLevels, recordValue) AddRecordValue(rowBuilder, recordType, parquetLevels, recordValue)
if count < 10 { if count < 10 {

View File

@ -16,17 +16,17 @@ import (
) )
type LocalPartition struct { type LocalPartition struct {
ListenersWaits int64 ListenersWaits int64
AckTsNs int64 AckTsNs int64
// notifying clients // notifying clients
ListenersLock sync.Mutex ListenersLock sync.Mutex
ListenersCond *sync.Cond ListenersCond *sync.Cond
Partition Partition
LogBuffer *log_buffer.LogBuffer LogBuffer *log_buffer.LogBuffer
Publishers *LocalPartitionPublishers Publishers *LocalPartitionPublishers
Subscribers *LocalPartitionSubscribers Subscribers *LocalPartitionSubscribers
followerStream mq_pb.SeaweedMessaging_PublishFollowMeClient followerStream mq_pb.SeaweedMessaging_PublishFollowMeClient
followerGrpcConnection *grpc.ClientConn followerGrpcConnection *grpc.ClientConn
@ -37,7 +37,7 @@ var TIME_FORMAT = "2006-01-02-15-04-05"
func NewLocalPartition(partition Partition, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition { func NewLocalPartition(partition Partition, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition {
lp := &LocalPartition{ lp := &LocalPartition{
Partition: partition, Partition: partition,
Publishers: NewLocalPartitionPublishers(), Publishers: NewLocalPartitionPublishers(),
Subscribers: NewLocalPartitionSubscribers(), Subscribers: NewLocalPartitionSubscribers(),
} }
@ -155,8 +155,8 @@ func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessa
if err = p.followerStream.Send(&mq_pb.PublishFollowMeRequest{ if err = p.followerStream.Send(&mq_pb.PublishFollowMeRequest{
Message: &mq_pb.PublishFollowMeRequest_Init{ Message: &mq_pb.PublishFollowMeRequest_Init{
Init: &mq_pb.PublishFollowMeRequest_InitMessage{ Init: &mq_pb.PublishFollowMeRequest_InitMessage{
Topic: initMessage.Topic, Topic: initMessage.Topic,
Partition: initMessage.Partition, Partition: initMessage.Partition,
}, },
}, },
}); err != nil { }); err != nil {

View File

@ -278,7 +278,7 @@ func TestDeleteEmptySelection(t *testing.T) {
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) { eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
for _, diskInfo := range dn.DiskInfos { for _, diskInfo := range dn.DiskInfos {
for _, v := range diskInfo.VolumeInfos { for _, v := range diskInfo.VolumeInfos {
if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 { if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id) fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id)
} }
} }

View File

@ -31,20 +31,20 @@ type VolumeGrowRequest struct {
} }
type volumeGrowthStrategy struct { type volumeGrowthStrategy struct {
Copy1Count int Copy1Count int
Copy2Count int Copy2Count int
Copy3Count int Copy3Count int
CopyOtherCount int CopyOtherCount int
Threshold float64 Threshold float64
} }
var ( var (
VolumeGrowStrategy = volumeGrowthStrategy{ VolumeGrowStrategy = volumeGrowthStrategy{
Copy1Count: 7, Copy1Count: 7,
Copy2Count: 6, Copy2Count: 6,
Copy3Count: 3, Copy3Count: 3,
CopyOtherCount: 1, CopyOtherCount: 1,
Threshold: 0.9, Threshold: 0.9,
} }
) )
@ -77,7 +77,8 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
// given copyCount, how many logical volumes to create // given copyCount, how many logical volumes to create
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) { func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
switch copyCount { switch copyCount {
case 1: count = VolumeGrowStrategy.Copy1Count case 1:
count = VolumeGrowStrategy.Copy1Count
case 2: case 2:
count = VolumeGrowStrategy.Copy2Count count = VolumeGrowStrategy.Copy2Count
case 3: case 3:

View File

@ -381,7 +381,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (total, a
} }
active++ active++
info, _ := dn.GetVolumesById(v) info, _ := dn.GetVolumesById(v)
if float64(info.Size) > float64(vl.volumeSizeLimit)* VolumeGrowStrategy.Threshold{ if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
crowded++ crowded++
} }
} }

View File

@ -19,7 +19,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
}, nil, func() { }, nil, func() {
}) })
startTime := MessagePosition{Time:time.Now()} startTime := MessagePosition{Time: time.Now()}
messageSize := 1024 messageSize := 1024
messageCount := 5000 messageCount := 5000
@ -38,7 +38,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
println("processed all messages") println("processed all messages")
return true, io.EOF return true, io.EOF
} }
return false,nil return false, nil
}) })
fmt.Printf("before flush: sent %d received %d\n", messageCount, receivedMessageCount) fmt.Printf("before flush: sent %d received %d\n", messageCount, receivedMessageCount)