feat: support parallel writer (#847)
* feat: support parallerl writer in sync & scan mode * feat: parallel parse, filter, function Signed-off-by: OxalisCu <2127298698@qq.com> * feat: parallel entry allocation Signed-off-by: OxalisCu <2127298698@qq.com> * feat: seperate ops log and status monitoring --------- Signed-off-by: OxalisCu <2127298698@qq.com>
This commit is contained in:
parent
0f42759419
commit
66ca8e71f0
|
@ -5,6 +5,7 @@ import (
|
|||
_ "net/http/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
@ -116,42 +117,95 @@ func main() {
|
|||
default:
|
||||
log.Panicf("no writer config entry found")
|
||||
}
|
||||
|
||||
// create status
|
||||
status.Init(theReader, theWriter)
|
||||
if config.Opt.Advanced.StatusPort != 0 {
|
||||
status.Init(theReader, theWriter)
|
||||
}
|
||||
// create log entry count
|
||||
logEntryCount := status.EntryCount{
|
||||
ReadCount: 0,
|
||||
WriteCount: 0,
|
||||
}
|
||||
|
||||
log.Infof("start syncing...")
|
||||
|
||||
ch := theReader.StartRead(ctx)
|
||||
go waitShutdown(cancel)
|
||||
|
||||
chrs := theReader.StartRead(ctx)
|
||||
|
||||
theWriter.StartWrite(ctx)
|
||||
|
||||
readerDone := make(chan bool)
|
||||
|
||||
for _, chr := range chrs {
|
||||
go func(ch chan *entry.Entry) {
|
||||
for e := range ch {
|
||||
// calc arguments
|
||||
e.Parse()
|
||||
|
||||
// update reader status
|
||||
if config.Opt.Advanced.StatusPort != 0 {
|
||||
status.AddReadCount(e.CmdName)
|
||||
}
|
||||
// update log entry count
|
||||
atomic.AddUint64(&logEntryCount.ReadCount, 1)
|
||||
|
||||
// filter
|
||||
if !filter.Filter(e) {
|
||||
log.Debugf("skip command: %v", e)
|
||||
continue
|
||||
}
|
||||
|
||||
// run lua function
|
||||
log.Debugf("function before: %v", e)
|
||||
entries := luaRuntime.RunFunction(e)
|
||||
log.Debugf("function after: %v", entries)
|
||||
|
||||
// write
|
||||
for _, theEntry := range entries {
|
||||
theEntry.Parse()
|
||||
theWriter.Write(theEntry)
|
||||
|
||||
// update writer status
|
||||
if config.Opt.Advanced.StatusPort != 0 {
|
||||
status.AddWriteCount(theEntry.CmdName)
|
||||
}
|
||||
// update log entry count
|
||||
atomic.AddUint64(&logEntryCount.WriteCount, 1)
|
||||
}
|
||||
}
|
||||
readerDone <- true
|
||||
}(chr)
|
||||
}
|
||||
|
||||
// caluate ops and log to screen
|
||||
go func() {
|
||||
if config.Opt.Advanced.LogInterval <= 0 {
|
||||
log.Infof("log interval is 0, will not log to screen")
|
||||
return
|
||||
}
|
||||
ticker := time.NewTicker(time.Duration(config.Opt.Advanced.LogInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
logEntryCount.UpdateOPS()
|
||||
log.Infof("%s, %s", logEntryCount.String(), theReader.StatusString())
|
||||
}
|
||||
}()
|
||||
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
defer ticker.Stop()
|
||||
readerCnt := len(chrs)
|
||||
Loop:
|
||||
for {
|
||||
select {
|
||||
case e, ok := <-ch:
|
||||
if !ok {
|
||||
// ch has been closed, exit the loop
|
||||
case done := <-readerDone:
|
||||
if done {
|
||||
readerCnt--
|
||||
}
|
||||
if readerCnt == 0 {
|
||||
break Loop
|
||||
}
|
||||
// calc arguments
|
||||
e.Parse()
|
||||
status.AddReadCount(e.CmdName)
|
||||
|
||||
// filter
|
||||
if !filter.Filter(e) {
|
||||
log.Debugf("skip command: %v", e)
|
||||
continue
|
||||
}
|
||||
log.Debugf("function before: %v", e)
|
||||
entries := luaRuntime.RunFunction(e)
|
||||
log.Debugf("function after: %v", entries)
|
||||
|
||||
for _, theEntry := range entries {
|
||||
theEntry.Parse()
|
||||
theWriter.Write(theEntry)
|
||||
status.AddWriteCount(theEntry.CmdName)
|
||||
}
|
||||
case <-ticker.C:
|
||||
pingEntry := entry.NewEntry()
|
||||
pingEntry.DbId = 0
|
||||
|
|
|
@ -66,7 +66,7 @@ func NewAOFReader(opts *AOFReaderOptions) Reader {
|
|||
return r
|
||||
}
|
||||
|
||||
func (r *aofReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
func (r *aofReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
//init entry
|
||||
r.ch = make(chan *entry.Entry, 1024)
|
||||
|
||||
|
@ -101,5 +101,5 @@ func (r *aofReader) StartRead(ctx context.Context) chan *entry.Entry {
|
|||
|
||||
}()
|
||||
|
||||
return r.ch
|
||||
return []chan *entry.Entry{r.ch}
|
||||
}
|
||||
|
|
|
@ -8,5 +8,5 @@ import (
|
|||
|
||||
type Reader interface {
|
||||
status.Statusable
|
||||
StartRead(ctx context.Context) chan *entry.Entry
|
||||
}
|
||||
StartRead(ctx context.Context) []chan *entry.Entry
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"RedisShake/internal/log"
|
||||
"RedisShake/internal/rdb"
|
||||
"RedisShake/internal/utils"
|
||||
|
||||
"github.com/dustin/go-humanize"
|
||||
)
|
||||
|
||||
|
@ -41,7 +42,7 @@ func NewRDBReader(opts *RdbReaderOptions) Reader {
|
|||
return r
|
||||
}
|
||||
|
||||
func (r *rdbReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
func (r *rdbReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
log.Infof("[%s] start read", r.stat.Name)
|
||||
r.ch = make(chan *entry.Entry, 1024)
|
||||
updateFunc := func(offset int64) {
|
||||
|
@ -58,7 +59,7 @@ func (r *rdbReader) StartRead(ctx context.Context) chan *entry.Entry {
|
|||
close(r.ch)
|
||||
}()
|
||||
|
||||
return r.ch
|
||||
return []chan *entry.Entry{r.ch}
|
||||
}
|
||||
|
||||
func (r *rdbReader) Status() interface{} {
|
||||
|
|
|
@ -3,7 +3,6 @@ package reader
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"RedisShake/internal/entry"
|
||||
"RedisShake/internal/utils"
|
||||
|
@ -26,23 +25,13 @@ func NewScanClusterReader(ctx context.Context, opts *ScanReaderOptions) Reader {
|
|||
return rd
|
||||
}
|
||||
|
||||
func (rd *scanClusterReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
ch := make(chan *entry.Entry, 1024)
|
||||
var wg sync.WaitGroup
|
||||
func (rd *scanClusterReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
chs := make([]chan *entry.Entry, 0)
|
||||
for _, r := range rd.readers {
|
||||
wg.Add(1)
|
||||
go func(r Reader) {
|
||||
for e := range r.StartRead(ctx) {
|
||||
ch <- e
|
||||
}
|
||||
wg.Done()
|
||||
}(r)
|
||||
ch := r.StartRead(ctx)
|
||||
chs = append(chs, ch[0])
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
return chs
|
||||
}
|
||||
|
||||
func (rd *scanClusterReader) Status() interface{} {
|
||||
|
|
|
@ -85,7 +85,7 @@ func NewScanStandaloneReader(ctx context.Context, opts *ScanReaderOptions) Reade
|
|||
return r
|
||||
}
|
||||
|
||||
func (r *scanStandaloneReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
func (r *scanStandaloneReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
r.ctx = ctx
|
||||
if r.opts.Scan {
|
||||
go r.scan()
|
||||
|
@ -95,7 +95,7 @@ func (r *scanStandaloneReader) StartRead(ctx context.Context) chan *entry.Entry
|
|||
}
|
||||
go r.dump()
|
||||
go r.restore()
|
||||
return r.ch
|
||||
return []chan *entry.Entry{r.ch}
|
||||
}
|
||||
|
||||
func (r *scanStandaloneReader) subscript() {
|
||||
|
|
|
@ -3,7 +3,6 @@ package reader
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"RedisShake/internal/entry"
|
||||
"RedisShake/internal/log"
|
||||
|
@ -30,23 +29,13 @@ func NewSyncClusterReader(ctx context.Context, opts *SyncReaderOptions) Reader {
|
|||
return rd
|
||||
}
|
||||
|
||||
func (rd *syncClusterReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
ch := make(chan *entry.Entry, 1024)
|
||||
var wg sync.WaitGroup
|
||||
func (rd *syncClusterReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
chs := make([]chan *entry.Entry, 0)
|
||||
for _, r := range rd.readers {
|
||||
wg.Add(1)
|
||||
go func(r Reader) {
|
||||
defer wg.Done()
|
||||
for e := range r.StartRead(ctx) {
|
||||
ch <- e
|
||||
}
|
||||
}(r)
|
||||
ch := r.StartRead(ctx)
|
||||
chs = append(chs, ch[0])
|
||||
}
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
return chs
|
||||
}
|
||||
|
||||
func (rd *syncClusterReader) Status() interface{} {
|
||||
|
|
|
@ -93,7 +93,7 @@ func NewSyncStandaloneReader(ctx context.Context, opts *SyncReaderOptions) Reade
|
|||
return r
|
||||
}
|
||||
|
||||
func (r *syncStandaloneReader) StartRead(ctx context.Context) chan *entry.Entry {
|
||||
func (r *syncStandaloneReader) StartRead(ctx context.Context) []chan *entry.Entry {
|
||||
r.ctx = ctx
|
||||
r.ch = make(chan *entry.Entry, 1024)
|
||||
go func() {
|
||||
|
@ -113,7 +113,7 @@ func (r *syncStandaloneReader) StartRead(ctx context.Context) chan *entry.Entry
|
|||
close(r.ch)
|
||||
}()
|
||||
|
||||
return r.ch
|
||||
return []chan *entry.Entry{r.ch}
|
||||
}
|
||||
|
||||
func (r *syncStandaloneReader) sendReplconfListenPort() {
|
||||
|
|
|
@ -18,7 +18,7 @@ type EntryCount struct {
|
|||
}
|
||||
|
||||
// call this function every second
|
||||
func (e *EntryCount) updateOPS() {
|
||||
func (e *EntryCount) UpdateOPS() {
|
||||
nowTimestampSec := float64(time.Now().UnixNano()) / 1e9
|
||||
if e.lastUpdateTimestampSec != 0 {
|
||||
timeIntervalSec := nowTimestampSec - e.lastUpdateTimestampSec
|
||||
|
|
|
@ -2,9 +2,6 @@ package status
|
|||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"RedisShake/internal/config"
|
||||
"RedisShake/internal/log"
|
||||
)
|
||||
|
||||
type Statusable interface {
|
||||
|
@ -32,9 +29,6 @@ var theWriter Statusable
|
|||
|
||||
func AddReadCount(cmd string) {
|
||||
ch <- func() {
|
||||
if stat.PerCmdEntriesCount == nil {
|
||||
stat.PerCmdEntriesCount = make(map[string]EntryCount)
|
||||
}
|
||||
cmdEntryCount, ok := stat.PerCmdEntriesCount[cmd]
|
||||
if !ok {
|
||||
cmdEntryCount = EntryCount{}
|
||||
|
@ -48,9 +42,6 @@ func AddReadCount(cmd string) {
|
|||
|
||||
func AddWriteCount(cmd string) {
|
||||
ch <- func() {
|
||||
if stat.PerCmdEntriesCount == nil {
|
||||
stat.PerCmdEntriesCount = make(map[string]EntryCount)
|
||||
}
|
||||
cmdEntryCount, ok := stat.PerCmdEntriesCount[cmd]
|
||||
if !ok {
|
||||
cmdEntryCount = EntryCount{}
|
||||
|
@ -68,6 +59,11 @@ func Init(r Statusable, w Statusable) {
|
|||
setStatusPort()
|
||||
stat.Time = time.Now().Format("2006-01-02 15:04:05")
|
||||
|
||||
// init per cmd entries count
|
||||
if stat.PerCmdEntriesCount == nil {
|
||||
stat.PerCmdEntriesCount = make(map[string]EntryCount)
|
||||
}
|
||||
|
||||
// for update reader/writer stat
|
||||
go func() {
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
|
@ -81,29 +77,14 @@ func Init(r Statusable, w Statusable) {
|
|||
stat.Consistent = lastConsistent && theReader.StatusConsistent() && theWriter.StatusConsistent()
|
||||
lastConsistent = stat.Consistent
|
||||
// update OPS
|
||||
stat.TotalEntriesCount.updateOPS()
|
||||
stat.TotalEntriesCount.UpdateOPS()
|
||||
for _, cmdEntryCount := range stat.PerCmdEntriesCount {
|
||||
cmdEntryCount.updateOPS()
|
||||
cmdEntryCount.UpdateOPS()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// for log to screen
|
||||
go func() {
|
||||
if config.Opt.Advanced.LogInterval <= 0 {
|
||||
log.Infof("log interval is 0, will not log to screen")
|
||||
return
|
||||
}
|
||||
ticker := time.NewTicker(time.Duration(config.Opt.Advanced.LogInterval) * time.Second)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
ch <- func() {
|
||||
log.Infof("%s, %s", stat.TotalEntriesCount.String(), theReader.StatusString())
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// run all func in ch
|
||||
go func() {
|
||||
for f := range ch {
|
||||
|
|
|
@ -3,10 +3,12 @@ package writer
|
|||
import (
|
||||
"RedisShake/internal/entry"
|
||||
"RedisShake/internal/status"
|
||||
"context"
|
||||
)
|
||||
|
||||
type Writer interface {
|
||||
status.Statusable
|
||||
Write(entry *entry.Entry)
|
||||
StartWrite(ctx context.Context) (ch chan *entry.Entry)
|
||||
Close()
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package writer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"RedisShake/internal/entry"
|
||||
"RedisShake/internal/log"
|
||||
|
@ -14,18 +15,22 @@ type RedisClusterWriter struct {
|
|||
addresses []string
|
||||
writers []Writer
|
||||
router [KeySlots]Writer
|
||||
|
||||
stat []interface{}
|
||||
ch chan *entry.Entry
|
||||
chWg sync.WaitGroup
|
||||
stat []interface{}
|
||||
}
|
||||
|
||||
func NewRedisClusterWriter(ctx context.Context, opts *RedisWriterOptions) Writer {
|
||||
rw := new(RedisClusterWriter)
|
||||
rw.loadClusterNodes(ctx, opts)
|
||||
rw.ch = make(chan *entry.Entry, 1024)
|
||||
log.Infof("redisClusterWriter connected to redis cluster successful. addresses=%v", rw.addresses)
|
||||
return rw
|
||||
}
|
||||
|
||||
func (r *RedisClusterWriter) Close() {
|
||||
r.chWg.Wait()
|
||||
close(r.ch)
|
||||
for _, writer := range r.writers {
|
||||
writer.Close()
|
||||
}
|
||||
|
@ -54,6 +59,20 @@ func (r *RedisClusterWriter) loadClusterNodes(ctx context.Context, opts *RedisWr
|
|||
}
|
||||
}
|
||||
|
||||
func (r *RedisClusterWriter) StartWrite(ctx context.Context) chan *entry.Entry {
|
||||
chs := make(map[string]chan *entry.Entry, len(r.writers))
|
||||
for _, w := range r.writers {
|
||||
stat := w.Status().(struct {
|
||||
Name string `json:"name"`
|
||||
UnansweredBytes int64 `json:"unanswered_bytes"`
|
||||
UnansweredEntries int64 `json:"unanswered_entries"`
|
||||
})
|
||||
chs[stat.Name] = w.StartWrite(ctx)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *RedisClusterWriter) Write(entry *entry.Entry) {
|
||||
if len(entry.Slots) == 0 {
|
||||
for _, writer := range r.writers {
|
||||
|
@ -61,7 +80,6 @@ func (r *RedisClusterWriter) Write(entry *entry.Entry) {
|
|||
}
|
||||
return
|
||||
}
|
||||
|
||||
lastSlot := -1
|
||||
for _, slot := range entry.Slots {
|
||||
if lastSlot == -1 {
|
||||
|
|
|
@ -34,8 +34,10 @@ type redisStandaloneWriter struct {
|
|||
DbId int
|
||||
|
||||
chWaitReply chan *entry.Entry
|
||||
chWg sync.WaitGroup
|
||||
chWaitWg sync.WaitGroup
|
||||
offReply bool
|
||||
ch chan *entry.Entry
|
||||
chWg sync.WaitGroup
|
||||
|
||||
stat struct {
|
||||
Name string `json:"name"`
|
||||
|
@ -49,13 +51,14 @@ func NewRedisStandaloneWriter(ctx context.Context, opts *RedisWriterOptions) Wri
|
|||
rw.address = opts.Address
|
||||
rw.stat.Name = "writer_" + strings.Replace(opts.Address, ":", "_", -1)
|
||||
rw.client = client.NewRedisClient(ctx, opts.Address, opts.Username, opts.Password, opts.Tls, false)
|
||||
rw.ch = make(chan *entry.Entry, 1024)
|
||||
if opts.OffReply {
|
||||
log.Infof("turn off the reply of write")
|
||||
rw.offReply = true
|
||||
rw.client.Send("CLIENT", "REPLY", "OFF")
|
||||
} else {
|
||||
rw.chWaitReply = make(chan *entry.Entry, config.Opt.Advanced.PipelineCountLimit)
|
||||
rw.chWg.Add(1)
|
||||
rw.chWaitWg.Add(1)
|
||||
go rw.processReply()
|
||||
}
|
||||
return rw
|
||||
|
@ -63,29 +66,43 @@ func NewRedisStandaloneWriter(ctx context.Context, opts *RedisWriterOptions) Wri
|
|||
|
||||
func (w *redisStandaloneWriter) Close() {
|
||||
if !w.offReply {
|
||||
close(w.chWaitReply)
|
||||
close(w.ch)
|
||||
w.chWg.Wait()
|
||||
close(w.chWaitReply)
|
||||
w.chWaitWg.Wait()
|
||||
}
|
||||
}
|
||||
|
||||
func (w *redisStandaloneWriter) Write(e *entry.Entry) {
|
||||
// switch db if we need
|
||||
if w.DbId != e.DbId {
|
||||
w.switchDbTo(e.DbId)
|
||||
}
|
||||
func (w *redisStandaloneWriter) StartWrite(ctx context.Context) chan *entry.Entry {
|
||||
w.chWg = sync.WaitGroup{}
|
||||
w.chWg.Add(1)
|
||||
go func() {
|
||||
for e := range w.ch {
|
||||
// switch db if we need
|
||||
if w.DbId != e.DbId {
|
||||
w.switchDbTo(e.DbId)
|
||||
}
|
||||
// send
|
||||
bytes := e.Serialize()
|
||||
for e.SerializedSize+atomic.LoadInt64(&w.stat.UnansweredBytes) > config.Opt.Advanced.TargetRedisClientMaxQuerybufLen {
|
||||
time.Sleep(1 * time.Nanosecond)
|
||||
}
|
||||
log.Debugf("[%s] send cmd. cmd=[%s]", w.stat.Name, e.String())
|
||||
if !w.offReply {
|
||||
w.chWaitReply <- e
|
||||
atomic.AddInt64(&w.stat.UnansweredBytes, e.SerializedSize)
|
||||
atomic.AddInt64(&w.stat.UnansweredEntries, 1)
|
||||
}
|
||||
w.client.SendBytes(bytes)
|
||||
}
|
||||
w.chWg.Done()
|
||||
}()
|
||||
|
||||
// send
|
||||
bytes := e.Serialize()
|
||||
for e.SerializedSize+atomic.LoadInt64(&w.stat.UnansweredBytes) > config.Opt.Advanced.TargetRedisClientMaxQuerybufLen {
|
||||
time.Sleep(1 * time.Nanosecond)
|
||||
}
|
||||
log.Debugf("[%s] send cmd. cmd=[%s]", w.stat.Name, e.String())
|
||||
if !w.offReply {
|
||||
w.chWaitReply <- e
|
||||
atomic.AddInt64(&w.stat.UnansweredBytes, e.SerializedSize)
|
||||
atomic.AddInt64(&w.stat.UnansweredEntries, 1)
|
||||
}
|
||||
w.client.SendBytes(bytes)
|
||||
return w.ch
|
||||
}
|
||||
|
||||
func (w *redisStandaloneWriter) Write(e *entry.Entry) {
|
||||
w.ch <- e
|
||||
}
|
||||
|
||||
func (w *redisStandaloneWriter) switchDbTo(newDbId int) {
|
||||
|
@ -124,7 +141,7 @@ func (w *redisStandaloneWriter) processReply() {
|
|||
atomic.AddInt64(&w.stat.UnansweredBytes, -e.SerializedSize)
|
||||
atomic.AddInt64(&w.stat.UnansweredEntries, -1)
|
||||
}
|
||||
w.chWg.Done()
|
||||
w.chWaitWg.Done()
|
||||
}
|
||||
|
||||
func (w *redisStandaloneWriter) Status() interface{} {
|
||||
|
|
Loading…
Reference in New Issue