This commit is contained in:
1340691923@qq.com 2022-03-04 09:52:18 +08:00
parent c0ee570422
commit 9e7b2a1673
6 changed files with 28 additions and 26 deletions

View File

@ -99,7 +99,7 @@ func AddMetaEvent(kafkaData model.KafkaData) (err error) {
func AddTableColumn(kafkaData model.KafkaData, failFunc func(data consumer_data.ReportAcceptStatusData), tableName string, ReqDataObject *parser.FastjsonMetric) (err error) { func AddTableColumn(kafkaData model.KafkaData, failFunc func(data consumer_data.ReportAcceptStatusData), tableName string, ReqDataObject *parser.FastjsonMetric) (err error) {
dims, err := sinker.GetDims(model.GlobConfig.Comm.ClickHouse.DbName, tableName, nil, db.ClickHouseSqlx) dims, err := sinker.GetDims(model.GlobConfig.Comm.ClickHouse.DbName, tableName, nil, db.ClickHouseSqlx,false)
if err != nil { if err != nil {
logs.Logger.Error("sinker.GetDims", zap.Error(err)) logs.Logger.Error("sinker.GetDims", zap.Error(err))
return return

View File

@ -106,7 +106,7 @@ func (this ReportController) ReportAction(ctx *fasthttp.RequestCtx) {
return return
} }
dims, err := sinker.GetDims(model.GlobConfig.Comm.ClickHouse.DbName, kafkaData.GetTableName(), []string{}, db.ClickHouseSqlx) dims, err := sinker.GetDims(model.GlobConfig.Comm.ClickHouse.DbName, kafkaData.GetTableName(), []string{}, db.ClickHouseSqlx,true)
if err != nil { if err != nil {
logs.Logger.Error("sinker.GetDims", zap.Error(err)) logs.Logger.Error("sinker.GetDims", zap.Error(err))
this.FastError(ctx, errors.New("服务异常")) this.FastError(ctx, errors.New("服务异常"))

View File

@ -24,6 +24,7 @@ type RealTimeWarehousing struct {
} }
func NewRealTimeWarehousing(batchSize, flushInterval int) *RealTimeWarehousing { func NewRealTimeWarehousing(batchSize, flushInterval int) *RealTimeWarehousing {
logs.Logger.Info("NewRealTimeWarehousing", zap.Int("batchSize", batchSize), zap.Int("flushInterval", flushInterval))
realTimeWarehousing := &RealTimeWarehousing{ realTimeWarehousing := &RealTimeWarehousing{
buffer: make([]*RealTimeWarehousingData, 0, batchSize), buffer: make([]*RealTimeWarehousingData, 0, batchSize),
bufferMutex: new(sync.RWMutex), bufferMutex: new(sync.RWMutex),

View File

@ -31,6 +31,7 @@ const FailStatus = 0
const SuccessStatus = 1 const SuccessStatus = 1
func NewReportAcceptStatus(batchSize int, flushInterval int) *ReportAcceptStatus { func NewReportAcceptStatus(batchSize int, flushInterval int) *ReportAcceptStatus {
logs.Logger.Info("NewReportAcceptStatus", zap.Int("batchSize", batchSize), zap.Int("flushInterval", flushInterval))
reportAcceptStatus := &ReportAcceptStatus{ reportAcceptStatus := &ReportAcceptStatus{
buffer: make([]*ReportAcceptStatusData, 0, batchSize), buffer: make([]*ReportAcceptStatusData, 0, batchSize),
bufferMutex: new(sync.RWMutex), bufferMutex: new(sync.RWMutex),
@ -85,10 +86,9 @@ func (this *ReportAcceptStatus) Flush() (err error) {
if err := tx.Commit(); err != nil { if err := tx.Commit(); err != nil {
logs.Logger.Error("入库数据状态出现错误", zap.Error(err)) logs.Logger.Error("入库数据状态出现错误", zap.Error(err))
} else { } else {
lostTime := time.Now().Sub(startNow).String()
len := len(this.buffer) len := len(this.buffer)
if len > 0 { if len > 0 {
logs.Logger.Info("入库数据状态成功", zap.String("所花时间", lostTime), zap.Int("数据长度为", len)) logs.Logger.Info("入库数据状态成功", zap.String("所花时间", time.Now().Sub(startNow).String()), zap.Int("数据长度为", len))
} }
} }
stmt.Close() stmt.Close()

View File

@ -46,18 +46,19 @@ func (this *ReportData2CK) Flush() (err error) {
startNow := time.Now() startNow := time.Now()
rowsMap := map[string][][]interface{}{} rowsMap := map[string][][]interface{}{}
rowArr := []interface{}{}
rows := [][]interface{}{}
for bufferIndex := range this.buffer { for bufferIndex := range this.buffer {
for tableName := range this.buffer[bufferIndex] { for tableName := range this.buffer[bufferIndex] {
rows := [][]interface{}{} rows := rows[0:0]
if _, haveKey := rowsMap[tableName]; haveKey { if _, haveKey := rowsMap[tableName]; haveKey {
rows = rowsMap[tableName] rows = rowsMap[tableName]
} else { } else {
rowsMap[tableName] = rows rowsMap[tableName] = rows
} }
v, _ := TableColumnMap.Load(tableName) dims, _ := TableColumnMap.Load(tableName)
dims := v.([]*model2.ColumnWithType) rowArr = rowArr[0:0]
var rowArr []interface{} for _, dim := range dims.([]*model2.ColumnWithType) {
for _, dim := range dims {
val := parser.GetValueByType(this.buffer[bufferIndex][tableName], dim) val := parser.GetValueByType(this.buffer[bufferIndex][tableName], dim)
rowArr = append(rowArr, val) rowArr = append(rowArr, val)
} }
@ -68,11 +69,9 @@ func (this *ReportData2CK) Flush() (err error) {
bytesbuffer:=bytes.Buffer{} bytesbuffer:=bytes.Buffer{}
TableColumnMap.Range(func(key, value interface{}) bool { TableColumnMap.Range(func(tableName, value interface{}) bool {
tableName := key.(string) if _, haveKey := rowsMap[tableName.(string)]; haveKey {
if _, haveKey := rowsMap[tableName]; haveKey {
seriesDims := value.([]*model2.ColumnWithType) seriesDims := value.([]*model2.ColumnWithType)
serDimsQuoted := make([]string, len(seriesDims)) serDimsQuoted := make([]string, len(seriesDims))
@ -84,7 +83,7 @@ func (this *ReportData2CK) Flush() (err error) {
} }
bytesbuffer.WriteString("INSERT INTO ") bytesbuffer.WriteString("INSERT INTO ")
bytesbuffer.WriteString(tableName) bytesbuffer.WriteString(tableName.(string))
bytesbuffer.WriteString(" (") bytesbuffer.WriteString(" (")
bytesbuffer.WriteString(strings.Join(serDimsQuoted, ",")) bytesbuffer.WriteString(strings.Join(serDimsQuoted, ","))
bytesbuffer.WriteString(") ") bytesbuffer.WriteString(") ")
@ -92,22 +91,23 @@ func (this *ReportData2CK) Flush() (err error) {
bytesbuffer.WriteString(strings.Join(params, ",")) bytesbuffer.WriteString(strings.Join(params, ","))
bytesbuffer.WriteString(")") bytesbuffer.WriteString(")")
insertSql := bytesbuffer.String() defer func() {
bytesbuffer.Reset() bytesbuffer.Reset()
}()
tx, err := db.ClickHouseSqlx.Begin() tx, err := db.ClickHouseSqlx.Begin()
if err != nil { if err != nil {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
return false return false
} }
stmt, err := tx.Prepare(insertSql) stmt, err := tx.Prepare(bytesbuffer.String())
if err != nil { if err != nil {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
return false return false
} }
defer stmt.Close() defer stmt.Close()
haveFail := false haveFail := false
for _, row := range rowsMap[tableName] { for _, row := range rowsMap[tableName.(string)] {
logs.Logger.Sugar().Infof("insertSQL", insertSql,row)
if _, err := stmt.Exec(row...); err != nil { if _, err := stmt.Exec(row...); err != nil {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
haveFail = true haveFail = true
@ -118,9 +118,8 @@ func (this *ReportData2CK) Flush() (err error) {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
return false return false
} else { } else {
lostTime := time.Now().Sub(startNow).String()
len := len(this.buffer) len := len(this.buffer)
logs.Logger.Info("CK入库成功", zap.String("所花时间", lostTime), zap.Int("数据长度为", len)) logs.Logger.Info("CK入库成功", zap.String("所花时间", time.Now().Sub(startNow).String()), zap.Int("数据长度为", len))
} }
} }
} }

View File

@ -84,12 +84,14 @@ func ClearDimsCacheByKey(key string){
dimsCacheMap.Delete(key) dimsCacheMap.Delete(key)
} }
func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (dims []*model2.ColumnWithType, err error) { func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB,onlyRedis bool) (dims []*model2.ColumnWithType, err error) {
dimsCachekey := GetDimsCachekey(database, table)
cache,load := dimsCacheMap.Load(dimsCachekey)
if load { dimsCachekey := GetDimsCachekey(database, table)
return cache.([]*model2.ColumnWithType),nil if !onlyRedis{
cache,load := dimsCacheMap.Load(dimsCachekey)
if load {
return cache.([]*model2.ColumnWithType),nil
}
} }
var json = jsoniter.ConfigCompatibleWithStandardLibrary var json = jsoniter.ConfigCompatibleWithStandardLibrary