mirror of
https://github.com/seaweedfs/seaweedfs.git
synced 2025-04-05 20:52:50 +08:00
go fmt
This commit is contained in:
parent
d3032d1e80
commit
d218fe54fa
@ -67,7 +67,7 @@ func (mc *MemChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop int64)
|
||||
maxStop = max(maxStop, logicStop)
|
||||
|
||||
if t.TsNs >= tsNs {
|
||||
println("read new data1", t.TsNs - tsNs, "ns")
|
||||
println("read new data1", t.TsNs-tsNs, "ns")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -137,7 +137,7 @@ func (sc *SwapFileChunk) ReadDataAt(p []byte, off int64, tsNs int64) (maxStop in
|
||||
maxStop = max(maxStop, logicStop)
|
||||
|
||||
if t.TsNs >= tsNs {
|
||||
println("read new data2", t.TsNs - tsNs, "ns")
|
||||
println("read new data2", t.TsNs-tsNs, "ns")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,6 @@ func (b *MessageQueueBroker) ConfigureTopic(ctx context.Context, request *mq_pb.
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
t := topic.FromPbTopic(request.Topic)
|
||||
var readErr, assignErr error
|
||||
resp, readErr = b.readTopicConfFromFiler(t)
|
||||
|
@ -69,7 +69,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
|
||||
return stream.Send(response)
|
||||
}
|
||||
|
||||
var receivedSequence, acknowledgedSequence int64
|
||||
var receivedSequence, acknowledgedSequence int64
|
||||
var isClosed bool
|
||||
|
||||
// start sending ack to publisher
|
||||
@ -85,7 +85,7 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
|
||||
lastAckTime := time.Now()
|
||||
for !isClosed {
|
||||
receivedSequence = atomic.LoadInt64(&localTopicPartition.AckTsNs)
|
||||
if acknowledgedSequence < receivedSequence && (receivedSequence - acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second){
|
||||
if acknowledgedSequence < receivedSequence && (receivedSequence-acknowledgedSequence >= ackInterval || time.Since(lastAckTime) > 1*time.Second) {
|
||||
acknowledgedSequence = receivedSequence
|
||||
response := &mq_pb.PublishMessageResponse{
|
||||
AckSequence: acknowledgedSequence,
|
||||
@ -101,7 +101,6 @@ func (b *MessageQueueBroker) PublishMessage(stream mq_pb.SeaweedMessaging_Publis
|
||||
}
|
||||
}()
|
||||
|
||||
|
||||
// process each published messages
|
||||
clientName := fmt.Sprintf("%v-%4d/%s/%v", findClientAddress(stream.Context()), rand.Intn(10000), initMessage.Topic, initMessage.Partition)
|
||||
localTopicPartition.Publishers.AddPublisher(clientName, topic.NewLocalPublisher())
|
||||
|
@ -13,10 +13,11 @@ import (
|
||||
)
|
||||
|
||||
type memBuffer struct {
|
||||
buf []byte
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
buf []byte
|
||||
startTime time.Time
|
||||
stopTime time.Time
|
||||
}
|
||||
|
||||
func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_PublishFollowMeServer) (err error) {
|
||||
var req *mq_pb.PublishFollowMeRequest
|
||||
req, err = stream.Recv()
|
||||
@ -84,7 +85,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
t, p := topic.FromPbTopic(initMessage.Topic), topic.FromPbPartition(initMessage.Partition)
|
||||
|
||||
logBuffer.ShutdownLogBuffer()
|
||||
@ -97,7 +97,6 @@ func (b *MessageQueueBroker) PublishFollowMe(stream mq_pb.SeaweedMessaging_Publi
|
||||
partitionGeneration := time.Unix(0, p.UnixTimeNs).UTC().Format(topic.TIME_FORMAT)
|
||||
partitionDir := fmt.Sprintf("%s/%s/%04d-%04d", topicDir, partitionGeneration, p.RangeStart, p.RangeStop)
|
||||
|
||||
|
||||
// flush the remaining messages
|
||||
inMemoryBuffers.CloseInput()
|
||||
for mem, found := inMemoryBuffers.Dequeue(); found; mem, found = inMemoryBuffers.Dequeue() {
|
||||
|
@ -45,7 +45,7 @@ func (b *MessageQueueBroker) genLogFlushFunc(t topic.Topic, partition *mq_pb.Par
|
||||
b.accessLock.Lock()
|
||||
defer b.accessLock.Unlock()
|
||||
p := topic.FromPbPartition(partition)
|
||||
if localPartition:=b.localTopicManager.GetLocalPartition(t, p); localPartition!=nil {
|
||||
if localPartition := b.localTopicManager.GetLocalPartition(t, p); localPartition != nil {
|
||||
localPartition.NotifyLogFlushed(logBuffer.LastFlushTsNs)
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ var (
|
||||
concurrency = flag.Int("c", 4, "concurrent publishers")
|
||||
partitionCount = flag.Int("p", 6, "partition count")
|
||||
|
||||
clientName = flag.String("client", "c1", "client name")
|
||||
clientName = flag.String("client", "c1", "client name")
|
||||
|
||||
namespace = flag.String("ns", "test", "namespace")
|
||||
t = flag.String("t", "test", "t")
|
||||
|
@ -19,7 +19,7 @@ var (
|
||||
concurrency = flag.Int("c", 4, "concurrent publishers")
|
||||
partitionCount = flag.Int("p", 6, "partition count")
|
||||
|
||||
clientName = flag.String("client", "c1", "client name")
|
||||
clientName = flag.String("client", "c1", "client name")
|
||||
|
||||
namespace = flag.String("ns", "test", "namespace")
|
||||
t = flag.String("t", "test", "t")
|
||||
|
@ -48,7 +48,7 @@ func (p *TopicPublisher) FinishPublish() error {
|
||||
if inputBuffers, found := p.partition2Buffer.AllIntersections(0, pub_balancer.MaxPartitionCount); found {
|
||||
for _, inputBuffer := range inputBuffers {
|
||||
inputBuffer.Enqueue(&mq_pb.DataMessage{
|
||||
TsNs: time.Now().UnixNano(),
|
||||
TsNs: time.Now().UnixNano(),
|
||||
Ctrl: &mq_pb.ControlMessage{
|
||||
IsClose: true,
|
||||
},
|
||||
|
@ -122,7 +122,7 @@ func (sub *TopicSubscriber) onEachPartition(assigned *mq_pb.BrokerPartitionAssig
|
||||
StartTsNs: sub.alreadyProcessedTsNs,
|
||||
StartType: mq_pb.PartitionOffsetStartType_EARLIEST_IN_MEMORY,
|
||||
},
|
||||
Filter: sub.ContentConfig.Filter,
|
||||
Filter: sub.ContentConfig.Filter,
|
||||
FollowerBrokers: assigned.FollowerBrokers,
|
||||
},
|
||||
},
|
||||
|
@ -17,7 +17,7 @@ type BrokerStats struct {
|
||||
}
|
||||
type TopicPartitionStats struct {
|
||||
topic.TopicPartition
|
||||
PublisherCount int32
|
||||
PublisherCount int32
|
||||
SubscriberCount int32
|
||||
}
|
||||
|
||||
@ -48,7 +48,7 @@ func (bs *BrokerStats) UpdateStats(stats *mq_pb.BrokerStats) {
|
||||
UnixTimeNs: topicPartitionStats.Partition.UnixTimeNs,
|
||||
},
|
||||
},
|
||||
PublisherCount: topicPartitionStats.PublisherCount,
|
||||
PublisherCount: topicPartitionStats.PublisherCount,
|
||||
SubscriberCount: topicPartitionStats.SubscriberCount,
|
||||
}
|
||||
publisherCount += topicPartitionStats.PublisherCount
|
||||
@ -76,7 +76,7 @@ func (bs *BrokerStats) RegisterAssignment(t *mq_pb.Topic, partition *mq_pb.Parti
|
||||
UnixTimeNs: partition.UnixTimeNs,
|
||||
},
|
||||
},
|
||||
PublisherCount: 0,
|
||||
PublisherCount: 0,
|
||||
SubscriberCount: 0,
|
||||
}
|
||||
key := tps.TopicPartition.String()
|
||||
|
@ -6,17 +6,17 @@ import (
|
||||
|
||||
type Schema struct {
|
||||
RecordType *schema_pb.RecordType
|
||||
fieldMap map[string]*schema_pb.Field
|
||||
fieldMap map[string]*schema_pb.Field
|
||||
}
|
||||
|
||||
func NewSchema(recordType *schema_pb.RecordType) (*Schema, error) {
|
||||
fieldMap := make( map[string]*schema_pb.Field)
|
||||
fieldMap := make(map[string]*schema_pb.Field)
|
||||
for _, field := range recordType.Fields {
|
||||
fieldMap[field.Name] = field
|
||||
}
|
||||
return &Schema{
|
||||
RecordType: recordType,
|
||||
fieldMap: fieldMap,
|
||||
fieldMap: fieldMap,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -8,9 +8,9 @@ import (
|
||||
var (
|
||||
TypeBoolean = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BOOL}}
|
||||
TypeInt32 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT32}}
|
||||
TypeInt64 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT64}}
|
||||
TypeFloat = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_FLOAT}}
|
||||
TypeDouble = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DOUBLE}}
|
||||
TypeInt64 = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_INT64}}
|
||||
TypeFloat = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_FLOAT}}
|
||||
TypeDouble = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_DOUBLE}}
|
||||
TypeBytes = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_BYTES}}
|
||||
TypeString = &schema_pb.Type{Kind: &schema_pb.Type_ScalarType{schema_pb.ScalarType_STRING}}
|
||||
)
|
||||
|
@ -32,10 +32,10 @@ func TestEnumScalarType(t *testing.T) {
|
||||
|
||||
func TestField(t *testing.T) {
|
||||
field := &Field{
|
||||
Name: "field_name",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
Name: "field_name",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
}
|
||||
assert.NotNil(t, field)
|
||||
}
|
||||
@ -44,32 +44,32 @@ func TestRecordType(t *testing.T) {
|
||||
subRecord := &RecordType{
|
||||
Fields: []*Field{
|
||||
{
|
||||
Name: "field_1",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
Name: "field_1",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
},
|
||||
{
|
||||
Name: "field_2",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_STRING}},
|
||||
FieldIndex: 2,
|
||||
IsRepeated: false,
|
||||
Name: "field_2",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_STRING}},
|
||||
FieldIndex: 2,
|
||||
IsRepeated: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
record := &RecordType{
|
||||
Fields: []*Field{
|
||||
{
|
||||
Name: "field_key",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
Name: "field_key",
|
||||
Type: &Type{Kind: &Type_ScalarType{ScalarType: ScalarType_INT32}},
|
||||
FieldIndex: 1,
|
||||
IsRepeated: false,
|
||||
},
|
||||
{
|
||||
Name: "field_record",
|
||||
Type: &Type{Kind: &Type_RecordType{RecordType: subRecord}},
|
||||
FieldIndex: 2,
|
||||
IsRepeated: false,
|
||||
Name: "field_record",
|
||||
Type: &Type{Kind: &Type_RecordType{RecordType: subRecord}},
|
||||
FieldIndex: 2,
|
||||
IsRepeated: false,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
@ -76,7 +76,7 @@ func TestStructToSchema(t *testing.T) {
|
||||
RecordTypeBegin().
|
||||
WithField("Field3", TypeString).
|
||||
WithField("Field4", TypeInt32).
|
||||
RecordTypeEnd(),
|
||||
RecordTypeEnd(),
|
||||
).
|
||||
RecordTypeEnd(),
|
||||
},
|
||||
@ -104,7 +104,7 @@ func TestStructToSchema(t *testing.T) {
|
||||
RecordTypeBegin().
|
||||
WithField("Field6", TypeString).
|
||||
WithField("Field7", TypeBytes).
|
||||
RecordTypeEnd(),
|
||||
RecordTypeEnd(),
|
||||
).RecordTypeEnd(),
|
||||
).
|
||||
RecordTypeEnd(),
|
||||
|
@ -7,9 +7,9 @@ import (
|
||||
|
||||
type ParquetLevels struct {
|
||||
startColumnIndex int
|
||||
endColumnIndex int
|
||||
definitionDepth int
|
||||
levels map[string]*ParquetLevels
|
||||
endColumnIndex int
|
||||
definitionDepth int
|
||||
levels map[string]*ParquetLevels
|
||||
}
|
||||
|
||||
func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) {
|
||||
@ -19,7 +19,7 @@ func ToParquetLevels(recordType *schema_pb.RecordType) (*ParquetLevels, error) {
|
||||
func toFieldTypeLevels(fieldType *schema_pb.Type, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
|
||||
switch fieldType.Kind.(type) {
|
||||
case *schema_pb.Type_ScalarType:
|
||||
return toFieldTypeScalarLevels(fieldType.GetScalarType(), startColumnIndex, definitionDepth)
|
||||
return toFieldTypeScalarLevels(fieldType.GetScalarType(), startColumnIndex, definitionDepth)
|
||||
case *schema_pb.Type_RecordType:
|
||||
return toRecordTypeLevels(fieldType.GetRecordType(), startColumnIndex, definitionDepth)
|
||||
case *schema_pb.Type_ListType:
|
||||
@ -35,15 +35,15 @@ func toFieldTypeListLevels(listType *schema_pb.ListType, startColumnIndex, defin
|
||||
func toFieldTypeScalarLevels(scalarType schema_pb.ScalarType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
|
||||
return &ParquetLevels{
|
||||
startColumnIndex: startColumnIndex,
|
||||
endColumnIndex: startColumnIndex + 1,
|
||||
definitionDepth: definitionDepth,
|
||||
endColumnIndex: startColumnIndex + 1,
|
||||
definitionDepth: definitionDepth,
|
||||
}, nil
|
||||
}
|
||||
func toRecordTypeLevels(recordType *schema_pb.RecordType, startColumnIndex, definitionDepth int) (*ParquetLevels, error) {
|
||||
recordTypeLevels := &ParquetLevels{
|
||||
startColumnIndex: startColumnIndex,
|
||||
definitionDepth: definitionDepth,
|
||||
levels: make(map[string]*ParquetLevels),
|
||||
definitionDepth: definitionDepth,
|
||||
levels: make(map[string]*ParquetLevels),
|
||||
}
|
||||
for _, field := range recordType.Fields {
|
||||
fieldTypeLevels, err := toFieldTypeLevels(field.Type, startColumnIndex, definitionDepth+1)
|
||||
|
@ -11,9 +11,9 @@ func TestToParquetLevels(t *testing.T) {
|
||||
recordType *schema_pb.RecordType
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want *ParquetLevels
|
||||
name string
|
||||
args args
|
||||
want *ParquetLevels
|
||||
}{
|
||||
{
|
||||
name: "nested type",
|
||||
@ -25,13 +25,13 @@ func TestToParquetLevels(t *testing.T) {
|
||||
RecordTypeBegin().
|
||||
WithField("zName", TypeString).
|
||||
WithField("emails", ListOf(TypeString)).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd()).
|
||||
WithField("Company", TypeString).
|
||||
WithRecordField("Address",
|
||||
RecordTypeBegin().
|
||||
WithField("Street", TypeString).
|
||||
WithField("City", TypeString).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd(),
|
||||
},
|
||||
want: &ParquetLevels{
|
||||
|
@ -31,7 +31,6 @@ func toParquetFieldType(fieldType *schema_pb.Type) (dataType parquet.Node, err e
|
||||
return nil, fmt.Errorf("unknown field type: %T", fieldType.Kind)
|
||||
}
|
||||
|
||||
|
||||
return dataType, err
|
||||
}
|
||||
|
||||
|
@ -70,7 +70,7 @@ func doVisitValue(fieldType *schema_pb.Type, levels *ParquetLevels, fieldValue *
|
||||
return
|
||||
}
|
||||
|
||||
func toParquetValue(value *schema_pb.Value) (parquet.Value, error) {
|
||||
func toParquetValue(value *schema_pb.Value) (parquet.Value, error) {
|
||||
switch value.Kind.(type) {
|
||||
case *schema_pb.Value_BoolValue:
|
||||
return parquet.BooleanValue(value.GetBoolValue()), nil
|
||||
|
@ -47,7 +47,7 @@ func toRecordValue(recordType *schema_pb.RecordType, levels *ParquetLevels, valu
|
||||
func toListValue(listType *schema_pb.ListType, levels *ParquetLevels, values []parquet.Value, valueIndex int) (listValue *schema_pb.Value, endValueIndex int, err error) {
|
||||
listValues := make([]*schema_pb.Value, 0)
|
||||
var value *schema_pb.Value
|
||||
for ;valueIndex < len(values); {
|
||||
for valueIndex < len(values) {
|
||||
if values[valueIndex].Column() != levels.startColumnIndex {
|
||||
break
|
||||
}
|
||||
@ -67,19 +67,19 @@ func toScalarValue(scalarType schema_pb.ScalarType, levels *ParquetLevels, value
|
||||
}
|
||||
switch scalarType {
|
||||
case schema_pb.ScalarType_BOOL:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_BoolValue{BoolValue: value.Boolean()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_INT32:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int32Value{Int32Value: value.Int32()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_INT64:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_Int64Value{Int64Value: value.Int64()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_FLOAT:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_FloatValue{FloatValue: value.Float()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_DOUBLE:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_DoubleValue{DoubleValue: value.Double()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_BYTES:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_BytesValue{BytesValue: value.ByteArray()}}, valueIndex + 1, nil
|
||||
case schema_pb.ScalarType_STRING:
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex+1, nil
|
||||
return &schema_pb.Value{Kind: &schema_pb.Value_StringValue{StringValue: string(value.ByteArray())}}, valueIndex + 1, nil
|
||||
}
|
||||
return nil, valueIndex, fmt.Errorf("unsupported scalar type: %v", scalarType)
|
||||
}
|
||||
|
@ -19,13 +19,13 @@ func TestWriteReadParquet(t *testing.T) {
|
||||
RecordTypeBegin().
|
||||
WithField("zName", TypeString).
|
||||
WithField("emails", ListOf(TypeString)).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd()).
|
||||
WithField("Company", TypeString).
|
||||
WithRecordField("Address",
|
||||
RecordTypeBegin().
|
||||
WithField("Street", TypeString).
|
||||
WithField("City", TypeString).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd()).
|
||||
RecordTypeEnd()
|
||||
fmt.Printf("RecordType: %v\n", recordType)
|
||||
|
||||
@ -85,9 +85,9 @@ func testWritingParquetFile(t *testing.T, count int, filename string, parquetSch
|
||||
fmt.Sprintf("john_%d@c.com", i),
|
||||
fmt.Sprintf("john_%d@d.com", i),
|
||||
fmt.Sprintf("john_%d@e.com", i)).
|
||||
RecordEnd()).
|
||||
RecordEnd()).
|
||||
SetString("Company", fmt.Sprintf("company_%d", i)).
|
||||
RecordEnd()
|
||||
RecordEnd()
|
||||
AddRecordValue(rowBuilder, recordType, parquetLevels, recordValue)
|
||||
|
||||
if count < 10 {
|
||||
|
@ -16,17 +16,17 @@ import (
|
||||
)
|
||||
|
||||
type LocalPartition struct {
|
||||
ListenersWaits int64
|
||||
AckTsNs int64
|
||||
ListenersWaits int64
|
||||
AckTsNs int64
|
||||
|
||||
// notifying clients
|
||||
ListenersLock sync.Mutex
|
||||
ListenersCond *sync.Cond
|
||||
|
||||
Partition
|
||||
LogBuffer *log_buffer.LogBuffer
|
||||
Publishers *LocalPartitionPublishers
|
||||
Subscribers *LocalPartitionSubscribers
|
||||
LogBuffer *log_buffer.LogBuffer
|
||||
Publishers *LocalPartitionPublishers
|
||||
Subscribers *LocalPartitionSubscribers
|
||||
|
||||
followerStream mq_pb.SeaweedMessaging_PublishFollowMeClient
|
||||
followerGrpcConnection *grpc.ClientConn
|
||||
@ -37,7 +37,7 @@ var TIME_FORMAT = "2006-01-02-15-04-05"
|
||||
|
||||
func NewLocalPartition(partition Partition, logFlushFn log_buffer.LogFlushFuncType, readFromDiskFn log_buffer.LogReadFromDiskFuncType) *LocalPartition {
|
||||
lp := &LocalPartition{
|
||||
Partition: partition,
|
||||
Partition: partition,
|
||||
Publishers: NewLocalPartitionPublishers(),
|
||||
Subscribers: NewLocalPartitionSubscribers(),
|
||||
}
|
||||
@ -155,8 +155,8 @@ func (p *LocalPartition) MaybeConnectToFollowers(initMessage *mq_pb.PublishMessa
|
||||
if err = p.followerStream.Send(&mq_pb.PublishFollowMeRequest{
|
||||
Message: &mq_pb.PublishFollowMeRequest_Init{
|
||||
Init: &mq_pb.PublishFollowMeRequest_InitMessage{
|
||||
Topic: initMessage.Topic,
|
||||
Partition: initMessage.Partition,
|
||||
Topic: initMessage.Topic,
|
||||
Partition: initMessage.Partition,
|
||||
},
|
||||
},
|
||||
}); err != nil {
|
||||
|
@ -278,7 +278,7 @@ func TestDeleteEmptySelection(t *testing.T) {
|
||||
eachDataNode(topologyInfo, func(dc string, rack RackId, dn *master_pb.DataNodeInfo) {
|
||||
for _, diskInfo := range dn.DiskInfos {
|
||||
for _, v := range diskInfo.VolumeInfos {
|
||||
if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
|
||||
if v.Size <= super_block.SuperBlockSize && v.ModifiedAtSecond > 0 {
|
||||
fmt.Printf("empty volume %d from %s\n", v.Id, dn.Id)
|
||||
}
|
||||
}
|
||||
|
@ -31,20 +31,20 @@ type VolumeGrowRequest struct {
|
||||
}
|
||||
|
||||
type volumeGrowthStrategy struct {
|
||||
Copy1Count int
|
||||
Copy2Count int
|
||||
Copy3Count int
|
||||
Copy1Count int
|
||||
Copy2Count int
|
||||
Copy3Count int
|
||||
CopyOtherCount int
|
||||
Threshold float64
|
||||
Threshold float64
|
||||
}
|
||||
|
||||
var (
|
||||
VolumeGrowStrategy = volumeGrowthStrategy{
|
||||
Copy1Count: 7,
|
||||
Copy2Count: 6,
|
||||
Copy3Count: 3,
|
||||
Copy1Count: 7,
|
||||
Copy2Count: 6,
|
||||
Copy3Count: 3,
|
||||
CopyOtherCount: 1,
|
||||
Threshold: 0.9,
|
||||
Threshold: 0.9,
|
||||
}
|
||||
)
|
||||
|
||||
@ -77,7 +77,8 @@ func NewDefaultVolumeGrowth() *VolumeGrowth {
|
||||
// given copyCount, how many logical volumes to create
|
||||
func (vg *VolumeGrowth) findVolumeCount(copyCount int) (count int) {
|
||||
switch copyCount {
|
||||
case 1: count = VolumeGrowStrategy.Copy1Count
|
||||
case 1:
|
||||
count = VolumeGrowStrategy.Copy1Count
|
||||
case 2:
|
||||
count = VolumeGrowStrategy.Copy2Count
|
||||
case 3:
|
||||
|
@ -381,7 +381,7 @@ func (vl *VolumeLayout) GetActiveVolumeCount(option *VolumeGrowOption) (total, a
|
||||
}
|
||||
active++
|
||||
info, _ := dn.GetVolumesById(v)
|
||||
if float64(info.Size) > float64(vl.volumeSizeLimit)* VolumeGrowStrategy.Threshold{
|
||||
if float64(info.Size) > float64(vl.volumeSizeLimit)*VolumeGrowStrategy.Threshold {
|
||||
crowded++
|
||||
}
|
||||
}
|
||||
|
@ -19,7 +19,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
|
||||
}, nil, func() {
|
||||
})
|
||||
|
||||
startTime := MessagePosition{Time:time.Now()}
|
||||
startTime := MessagePosition{Time: time.Now()}
|
||||
|
||||
messageSize := 1024
|
||||
messageCount := 5000
|
||||
@ -38,7 +38,7 @@ func TestNewLogBufferFirstBuffer(t *testing.T) {
|
||||
println("processed all messages")
|
||||
return true, io.EOF
|
||||
}
|
||||
return false,nil
|
||||
return false, nil
|
||||
})
|
||||
|
||||
fmt.Printf("before flush: sent %d received %d\n", messageCount, receivedMessageCount)
|
||||
|
Loading…
Reference in New Issue
Block a user