代码优化

This commit is contained in:
1340691923@qq.com 2022-03-03 15:50:32 +08:00
parent ba035974b5
commit e45788427e
7 changed files with 119 additions and 32 deletions

View File

@ -12,6 +12,7 @@ import (
"github.com/1340691923/xwl_bi/engine/logs" "github.com/1340691923/xwl_bi/engine/logs"
"github.com/1340691923/xwl_bi/middleware" "github.com/1340691923/xwl_bi/middleware"
"github.com/1340691923/xwl_bi/model" "github.com/1340691923/xwl_bi/model"
"github.com/1340691923/xwl_bi/platform-basic-libs/sinker"
_ "github.com/ClickHouse/clickhouse-go" _ "github.com/ClickHouse/clickhouse-go"
"github.com/buaazp/fasthttprouter" "github.com/buaazp/fasthttprouter"
_ "github.com/go-sql-driver/mysql" _ "github.com/go-sql-driver/mysql"
@ -72,6 +73,7 @@ func main() {
} }
} }
}() }()
go sinker.ClearDimsCacheByTime(time.Minute * 2)
router := fasthttprouter.New() router := fasthttprouter.New()

View File

@ -212,10 +212,13 @@ func AddTableColumn(kafkaData model.KafkaData, failFunc func(data consumer_data.
redisConn := db.RedisPool.Get() redisConn := db.RedisPool.Get()
defer redisConn.Close() defer redisConn.Close()
dimsCachekey := sinker.GetDimsCachekey(model.GlobConfig.Comm.ClickHouse.DbName, tableName) dimsCachekey := sinker.GetDimsCachekey(model.GlobConfig.Comm.ClickHouse.DbName, tableName)
_, err = redisConn.Do("del", dimsCachekey) _, err = redisConn.Do("unlink", dimsCachekey)
if err != nil { if err != nil {
redisConn.Do("del", dimsCachekey)
logs.Logger.Error("err", zap.Error(err)) logs.Logger.Error("err", zap.Error(err))
} }
sinker.ClearDimsCacheByKey(dimsCachekey)
}() }()
} }

View File

@ -24,6 +24,7 @@ import (
_ "net/http/pprof" _ "net/http/pprof"
"runtime" "runtime"
"strconv" "strconv"
"time"
) )
var ( var (
@ -108,6 +109,7 @@ func main() {
realTimeDataSarama := sinker.NewKafkaSarama() realTimeDataSarama := sinker.NewKafkaSarama()
reportData2CKSarama := realTimeDataSarama.Clone() reportData2CKSarama := realTimeDataSarama.Clone()
go action.MysqlConsumer() go action.MysqlConsumer()
go sinker.ClearDimsCacheByTime(time.Minute * 2)
var json = jsoniter.ConfigCompatibleWithStandardLibrary var json = jsoniter.ConfigCompatibleWithStandardLibrary
pp, err := parser.NewParserPool() pp, err := parser.NewParserPool()
if err != nil { if err != nil {

View File

@ -19,7 +19,7 @@ func ClearCacheByAppid(key string) (err error) {
defer conn.Close() defer conn.Close()
_, err = conn.Do("unlink", key) _, err = conn.Do("unlink", key)
if err != nil { if err != nil {
_, err = conn.Do("del", key) conn.Do("del", key)
} }
return return
} }

View File

@ -7,7 +7,6 @@ import (
model2 "github.com/1340691923/xwl_bi/platform-basic-libs/sinker/model" model2 "github.com/1340691923/xwl_bi/platform-basic-libs/sinker/model"
parser "github.com/1340691923/xwl_bi/platform-basic-libs/sinker/parse" parser "github.com/1340691923/xwl_bi/platform-basic-libs/sinker/parse"
"go.uber.org/zap" "go.uber.org/zap"
"log"
"strings" "strings"
"sync" "sync"
"time" "time"
@ -20,6 +19,7 @@ type ReportData2CK struct {
bufferMutex *sync.RWMutex bufferMutex *sync.RWMutex
batchSize int batchSize int
flushInterval int flushInterval int
pool sync.Pool
} }
func NewReportData2CK(batchSize int, flushInterval int) *ReportData2CK { func NewReportData2CK(batchSize int, flushInterval int) *ReportData2CK {
@ -37,6 +37,19 @@ func NewReportData2CK(batchSize int, flushInterval int) *ReportData2CK {
return reportData2CK return reportData2CK
} }
func(this *ReportData2CK)GetBuffer()*bytes.Buffer{
v := this.pool.Get()
if v == nil {
return new(bytes.Buffer)
}
return v.(*bytes.Buffer)
}
func(this *ReportData2CK)PutBuffer(buff *bytes.Buffer){
buff.Reset()
this.pool.Put(buff)
}
func (this *ReportData2CK) Flush() (err error) { func (this *ReportData2CK) Flush() (err error) {
this.bufferMutex.Lock() this.bufferMutex.Lock()
if len(this.buffer)==0{ if len(this.buffer)==0{
@ -46,8 +59,8 @@ func (this *ReportData2CK) Flush() (err error) {
startNow := time.Now() startNow := time.Now()
rowsMap := map[string][][]interface{}{} rowsMap := map[string][][]interface{}{}
for _, data := range this.buffer { for bufferIndex := range this.buffer {
for tableName, metric := range data { for tableName := range this.buffer[bufferIndex] {
rows := [][]interface{}{} rows := [][]interface{}{}
if _, haveKey := rowsMap[tableName]; haveKey { if _, haveKey := rowsMap[tableName]; haveKey {
rows = rowsMap[tableName] rows = rowsMap[tableName]
@ -58,7 +71,7 @@ func (this *ReportData2CK) Flush() (err error) {
dims := v.([]*model2.ColumnWithType) dims := v.([]*model2.ColumnWithType)
var rowArr []interface{} var rowArr []interface{}
for _, dim := range dims { for _, dim := range dims {
val := parser.GetValueByType(metric, dim) val := parser.GetValueByType(this.buffer[bufferIndex][tableName], dim)
rowArr = append(rowArr, val) rowArr = append(rowArr, val)
} }
rows = append(rows, rowArr) rows = append(rows, rowArr)
@ -66,7 +79,8 @@ func (this *ReportData2CK) Flush() (err error) {
} }
} }
buffer := bytes.Buffer{} bytesbuffer:=this.GetBuffer()
defer this.PutBuffer(bytesbuffer)
TableColumnMap.Range(func(key, value interface{}) bool { TableColumnMap.Range(func(key, value interface{}) bool {
@ -77,22 +91,27 @@ func (this *ReportData2CK) Flush() (err error) {
seriesDims := value.([]*model2.ColumnWithType) seriesDims := value.([]*model2.ColumnWithType)
serDimsQuoted := make([]string, len(seriesDims)) serDimsQuoted := make([]string, len(seriesDims))
params := make([]string, len(seriesDims)) params := make([]string, len(seriesDims))
for i, serDim := range seriesDims { for i, serDim := range seriesDims {
serDimsQuoted[i] = "`" + serDim.Name + "`" bytesbuffer.WriteString("`")
bytesbuffer.WriteString(serDim.Name)
bytesbuffer.WriteString("`")
serDimsQuoted[i] = bytesbuffer.String()
bytesbuffer.Reset()
params[i] = "?" params[i] = "?"
} }
buffer.WriteString("INSERT INTO ") bytesbuffer.WriteString("INSERT INTO ")
buffer.WriteString(tableName) bytesbuffer.WriteString(tableName)
buffer.WriteString(" (") bytesbuffer.WriteString(" (")
buffer.WriteString(strings.Join(serDimsQuoted, ",")) bytesbuffer.WriteString(strings.Join(serDimsQuoted, ","))
buffer.WriteString(") ") bytesbuffer.WriteString(") ")
buffer.WriteString("VALUES (") bytesbuffer.WriteString("VALUES (")
buffer.WriteString(strings.Join(params, ",")) bytesbuffer.WriteString(strings.Join(params, ","))
buffer.WriteString(")") bytesbuffer.WriteString(")")
insertSql := buffer.String() insertSql := bytesbuffer.String()
buffer.Reset() bytesbuffer.Reset()
tx, err := db.ClickHouseSqlx.Begin() tx, err := db.ClickHouseSqlx.Begin()
if err != nil { if err != nil {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
@ -106,7 +125,6 @@ func (this *ReportData2CK) Flush() (err error) {
defer stmt.Close() defer stmt.Close()
haveFail := false haveFail := false
for _, row := range rowsMap[tableName] { for _, row := range rowsMap[tableName] {
log.Println("row",row)
if _, err := stmt.Exec(row...); err != nil { if _, err := stmt.Exec(row...); err != nil {
logs.Logger.Error("CK入库失败", zap.Error(err)) logs.Logger.Error("CK入库失败", zap.Error(err))
haveFail = true haveFail = true

View File

@ -18,6 +18,7 @@ import (
"regexp" "regexp"
"strings" "strings"
"sync" "sync"
"time"
) )
var ( var (
@ -38,27 +39,64 @@ func GetDimsCachekey(database, table string) string {
return dimsCachekey return dimsCachekey
} }
var dimsCacheMap sync.Map
func ClearDimsCacheByTime(clearTime time.Duration){
func init() { for{
time.Sleep(clearTime)
dimsCacheMap.Range(func(key, value interface{}) bool {
ClearDimsCacheByRedis(key.(string))
dimsCacheMap.Delete(key)
return true
})
} }
}
func ClearDimsCacheByRedis(key string){
redisConn := db.RedisPool.Get()
defer redisConn.Close()
_, err := redisConn.Do("unlink", key)
if err != nil {
redisConn.Do("del", key)
logs.Logger.Error("err", zap.Error(err))
}
}
func ClearDimsCacheByKey(key string){
dimsCacheMap.Delete(key)
}
func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (dims []*model2.ColumnWithType, err error) { func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (dims []*model2.ColumnWithType, err error) {
var json = jsoniter.ConfigCompatibleWithStandardLibrary
dimsCachekey := GetDimsCachekey(database, table) dimsCachekey := GetDimsCachekey(database, table)
cache,load := dimsCacheMap.Load(dimsCachekey)
if load {
return cache.([]*model2.ColumnWithType),nil
}
var json = jsoniter.ConfigCompatibleWithStandardLibrary
redisConn := db.RedisPool.Get() redisConn := db.RedisPool.Get()
defer redisConn.Close() defer redisConn.Close()
dimsBytes, redisErr := redis.Bytes(redisConn.Do("get", dimsCachekey)) dimsBytes, redisErr := redis.Bytes(redisConn.Do("get", dimsCachekey))
if redisErr == nil && len(dimsBytes) != 0 { if redisErr == nil && len(dimsBytes) != 0 {
jsonErr := json.Unmarshal(dimsBytes, &dims) dimsCache,err:=util.GzipUnCompressByte(dimsBytes)
if err==nil{
jsonErr := json.Unmarshal(dimsCache, &dims)
if jsonErr == nil { if jsonErr == nil {
return dimsCacheMap.Store(dimsCachekey,dims)
return dims,err
} else { } else {
logs.Logger.Error("jsonErr", zap.Error(jsonErr)) logs.Logger.Error("jsonErr", zap.Error(jsonErr))
} }
}else{
logs.Logger.Error("GzipUnCompressByte Err", zap.Error(err))
}
} else { } else {
logs.Logger.Error("redisErr", zap.Error(redisErr)) logs.Logger.Error("redisErr", zap.Error(redisErr))
} }
@ -66,7 +104,7 @@ func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (d
var rs *sql.Rows var rs *sql.Rows
if rs, err = conn.Query(fmt.Sprintf(selectSQLTemplate, database, table)); err != nil { if rs, err = conn.Query(fmt.Sprintf(selectSQLTemplate, database, table)); err != nil {
err = errors.Wrapf(err, "") err = errors.Wrapf(err, "")
return return dims,err
} }
defer rs.Close() defer rs.Close()
@ -74,7 +112,7 @@ func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (d
for rs.Next() { for rs.Next() {
if err = rs.Scan(&name, &typ, &defaultKind); err != nil { if err = rs.Scan(&name, &typ, &defaultKind); err != nil {
err = errors.Wrapf(err, "") err = errors.Wrapf(err, "")
return return dims,err
} }
typ = lowCardinalityRegexp.ReplaceAllString(typ, "$1") typ = lowCardinalityRegexp.ReplaceAllString(typ, "$1")
if !util.InstrArr(excludedColumns, name) && defaultKind != "MATERIALIZED" { if !util.InstrArr(excludedColumns, name) && defaultKind != "MATERIALIZED" {
@ -84,14 +122,18 @@ func GetDims(database, table string, excludedColumns []string, conn *sqlx.DB) (d
} }
if len(dims) == 0 { if len(dims) == 0 {
err = errors.Wrapf(ErrTblNotExist, "%s.%s", database, table) err = errors.Wrapf(ErrTblNotExist, "%s.%s", database, table)
return return dims,err
} }
dimsCacheMap.Store(dimsCachekey,dims)
res, _ := json.Marshal(dims) res, _ := json.Marshal(dims)
s,err:=util.GzipCompressByte(res)
if err!=nil{
return dims,err
}
_, err = redisConn.Do("SETEX", dimsCachekey, 60*60*6, s)
_, err = redisConn.Do("SETEX", dimsCachekey, 60*60*6, res) return dims,err
return
} }
func GetSourceName(name string) (sourcename string) { func GetSourceName(name string) (sourcename string) {

View File

@ -6,6 +6,17 @@ import (
"io/ioutil" "io/ioutil"
) )
func GzipCompressByte(data []byte) ([]byte, error) {
buf := bytes.NewBuffer(nil)
gzW := gzip.NewWriter(buf)
_, err := gzW.Write(data)
if err != nil {
return nil, err
}
gzW.Close()
return buf.Bytes(), err
}
func GzipCompress(data string) ([]byte, error) { func GzipCompress(data string) ([]byte, error) {
buf := bytes.NewBuffer(nil) buf := bytes.NewBuffer(nil)
gzW := gzip.NewWriter(buf) gzW := gzip.NewWriter(buf)
@ -25,3 +36,12 @@ func GzipUnCompress(data []byte) (string, error) {
b, err := ioutil.ReadAll(gzR) b, err := ioutil.ReadAll(gzR)
return Bytes2str(b), err return Bytes2str(b), err
} }
func GzipUnCompressByte(data []byte) ([]byte, error) {
gzR, err := gzip.NewReader(bytes.NewReader(data))
if err != nil {
return nil, err
}
b, err := ioutil.ReadAll(gzR)
return b, err
}