diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..e050434 --- /dev/null +++ b/.gitignore @@ -0,0 +1,48 @@ +#java ignore +# Compiled class file +*.class + +# Log file +*.log +**/log +**/logs +# example yaml files dir +**/execyaml + +# result or report file +compare_* + +# BlueJ files +*.ctxt + +# Mobile Tools for Java (J2ME) +.mtj.tmp/ + +# Package Files # +*.jar +*.war +*.nar +*.ear +*.zip +*.tar.gz +*.rar +# go ignore +**/vendor +*.exe +*.exe~ +*.dll +*.so +*.dylib + + +#idea ignore +**/.idea + +# vscode +**/.vscode +# rust ignore +Cargo.lock +**/target +**/*.rs.bk + + diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d645695 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/README.md b/README.md new file mode 100644 index 0000000..f496168 --- /dev/null +++ b/README.md @@ -0,0 +1,22 @@ +# redissyncer-scenariotest + +## 项目目的 + +本项目用于redissyncer 涉及场景的自动化测试,branch distribution用于分布式场景的自动化测试 + +## 项目架构 + +* caseyaml目录中的yaml文件用于描述场景用例,包括生成数据的批次,同步程序的位置等信息 +* tasks目录用于存放任务创建json,用来创建各种同步任务,供case调用创建对应的任务 + +# 场景描述 + +* 单实例同步 +* 单实例带映射关系的同步 +* 单实例到原生集群同步 +* 原生集群到原生集群同步 +* 单实例同步+断点续传 +* 单实例带映射关系的同步+断点续传 +* 单实例到原生集群同步+断点续传 +* 原生集群到原生集群同步+断点续传 +* 任务节点shutdown情况下的任务迁移 \ No newline at end of file diff --git a/Todo.md b/Todo.md new file mode 100644 index 0000000..eb03ec2 --- /dev/null +++ b/Todo.md @@ -0,0 +1,13 @@ +- [ ] compare 模块多线程改造 +- [X] Single2SingleWithDbMap编写 +- [x] yaml编排测试用例 +- [ ] redis conn健康检查和重试 +- [ ] 重新部署redis集群,指定master和slave +- [X] 细化redis基础命令 +- [ ] 节点宕机后任务迁移测试 + +## 重构 + +- [ ] 重构log模块 +- [ ] 重新数据生成cmd + diff --git a/cases/basecase.go b/cases/basecase.go new file mode 100644 index 0000000..e2b3401 --- /dev/null +++ b/cases/basecase.go @@ -0,0 +1,255 @@ +package cases + +import ( + "github.com/tidwall/gjson" + "go.uber.org/zap" + "gopkg.in/yaml.v2" + "io/ioutil" + "os" + "runtime" + "testcase/global" + //"testcase/globalzap" + "testcase/synctaskhandle" + "time" +) + +//var logger = global.RSPLog + +type CaseType int32 + +const ( + Case_Single2Single = iota + Case_Single2SingleWithDBMap + Case_Single2Cluster + Case_Cluster2Cluster + Case_ImportRdb2Single + Case_ImportAof2Single + Case_ImportRdb2Cluster + Case_ImportAof2Cluster +) + +var CaseTypeMap = map[int32]string{ + Case_Single2Single: "Single2Single", + Case_Single2SingleWithDBMap: "Single2SingleWithDBMap", + Case_Single2Cluster: "Single2Cluster", + Case_Cluster2Cluster: "Cluster2Cluster", + Case_ImportRdb2Single: "ImportRdb2Single", + Case_ImportAof2Single: "ImportAof2Single", + Case_ImportRdb2Cluster: "ImportRdb2Cluster", + Case_ImportAof2Cluster: "ImportAof2Cluster", +} + +func (ct CaseType) String() string { + switch ct { + case Case_Single2Single: + return "Single2Single" + case Case_Single2SingleWithDBMap: + return "Single2SingleWithDBMap" + case Case_Single2Cluster: + return "Single2Cluster" + case Case_Cluster2Cluster: + return "Cluster2Cluster" + case Case_ImportRdb2Single: + return "ImportRdb2Single" + case Case_ImportAof2Single: + return "ImportAof2Single" + case Case_ImportRdb2Cluster: + return "ImportRdb2Cluster" + case Case_ImportAof2Cluster: + return "ImportAof2Cluster" + default: + return "" + } +} + +type TestCase struct { + SyncServer string `yaml:"syncserver"` //redissyncer server address + SyncServerSshPort string `yaml:"syncserversshport"` + SyncServerOsUser string `yaml:"syncserverosuser"` //redissyncer server操作系统用户 + SyncServerOsUserPassword string `yaml:"syncserverosuserpassword"` //redissyncer server操作系统用户密码 + CreateTaskFile string `yaml:"createtaskfile"` //任务创建json文件路径 + GenDataDuration int `yaml:"gendataduration"` //持续产生增量数据的时间,单位为秒 + DataGenInterval int64 `yaml:"datageninterval"` //线程内数据生成间隔,单位为毫秒 + GenDataThreads int `yaml:"gendatathreads"` //持续生成数据的线程数量 + BigKV_KeySuffix_Len int `yaml:"bigkvkeysuffixlen"` //大key后缀位数,按位数生成key后缀 + BigKV_Loopstep int `yaml:"bigkvloopstep"` //大key循环次数,该参数决定大key value的长度 + BigKV_EXPIRE int `yaml:"bigkvexpire"` //大key过期时间,单位为秒 + BigKV_ValuePrefix_Len int `yaml:"bigkvvalueprefixlen"` //大key value前缀长度,按长度生成值的前缀 + Increment_KeySuffix_Len int `yaml:"incrementkeysuffixlen"` //增量数据key后缀位数,按位生成key后缀 + Increment_Loopstep int `yaml:"incrementloopstep"` //增量数据循环长度,影响增量数据value长度或操作次数 + Increment_EXPIRE int `yaml:"incrementexpire"` //增量数据过期时间,单位为秒 + Increment_Threads int `yaml:"incrementthreads"` //生成增量数据的线程数量 + Compare_BatchSize int64 `yaml:"comparebatchsize"` //比较List、Set、Zset类型时的每批次值的数量 + Compare_Threads int `yaml:"comparethreads"` //比较db线程数量 + Compare_TTLDiff float64 `yaml:"comparettldiff"` //TTL最小差值 + GenRdbRedis string `yaml:"genrdbredis"` //导入文件时生成rdb文件的redis服务器地址 + GenRdbRedisPassword string `yaml:"genrdbredispassword"` //导入文件案例中生成rdb文件的redis服务器密码 + GenRdbRedisOsUser string `yaml:"genrdbredisosuser"` //产生rdb文件的服务器操作系统user + GenRdbRedisOsUserPassword string `yaml:"genrdbredisosuserpassword"` //产生rdb文件的服务器操作系统user's password + DumpFilePath string `yaml:"dumpfilepath"` //rdb文件路径 + SyncOsFilePath string `yaml:"syncosfilepath"` //dump 或 aof文件或目录的操作系统路径 + CaseType CaseType `yaml:"casetype"` //案例类型编号,可以通过 listcases子命令查询对应的case编号 +} + +func NewTestCase() TestCase { + tc := TestCase{ + SyncServer: "127.0.0.1:8080", + GenDataDuration: 60, + DataGenInterval: int64(300), + GenDataThreads: runtime.NumCPU(), + BigKV_KeySuffix_Len: 4, + BigKV_Loopstep: 20, + BigKV_EXPIRE: 3600, + BigKV_ValuePrefix_Len: 512, + Increment_KeySuffix_Len: 4, + Increment_Loopstep: 20, + Increment_EXPIRE: 1800, + Increment_Threads: runtime.NumCPU(), + Compare_BatchSize: int64(50), + Compare_Threads: runtime.NumCPU(), + Compare_TTLDiff: float64(100000), + CaseType: Case_Single2Single, + } + + return tc +} + +func (tc *TestCase) Exec() { + switch tc.CaseType.String() { + case "Single2Single": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.Single2Single() + case "Single2SingleWithDBMap": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.Single2SingleWithDBMap() + case "Single2Cluster": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.Single2Cluster() + case "Cluster2Cluster": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.Cluster2Cluster() + case "ImportRdb2Single": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.ImportRdb2Single() + case "ImportAof2Single": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.ImportAof2Single() + case "ImportRdb2Cluster": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.ImportRdb2Cluster() + case "ImportAof2Cluster": + global.RSPLog.Sugar().Info("Execute " + tc.CaseType.String()) + tc.ImportAof2Cluster() + default: + global.RSPLog.Sugar().Info("Nothing to be executed") + return + } + +} + +//解析yaml文件获取testcase +func (tc *TestCase) ParseYamlFile(filepath string) error { + yamlFile, err := ioutil.ReadFile(filepath) + if err != nil { + global.RSPLog.Sugar().Error(err) + return err + } + err = yaml.Unmarshal(yamlFile, tc) + if err != nil { + global.RSPLog.Sugar().Error(err) + return err + } + return nil +} + +//解析同步任务的jsonfile +func (tc *TestCase) ParseJsonFile(casefile string) []byte { + + jsonFile, err := os.Open(casefile) + defer jsonFile.Close() + + if err != nil { + //logger.Println(err) + global.RSPLog.Info(err.Error()) + os.Exit(1) + } + + jsonbytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + //logger.Println(err) + global.RSPLog.Info(err.Error()) + os.Exit(1) + } + return jsonbytes +} + +//验证任务状态是否可以关闭,并保证数据同步完成 +func (tc *TestCase) CheckSyncTaskStatus(taskids []string) { + //查看任务状态,直到COMMANDRUNING状态 + global.RSPLog.Sugar().Info("Check task status begin...") + + for { + + iscommandrunning := false + statusmap, err := synctaskhandle.GetTaskStatus(tc.SyncServer, taskids) + + if err != nil { + global.RSPLog.Sugar().Error(err) + os.Exit(1) + } + + if len(statusmap) == 0 { + global.RSPLog.Error("No status return") + os.Exit(1) + } + + for k, v := range statusmap { + + lastKeyAcross, err := synctaskhandle.GetLastKeyAcross(tc.SyncServer, k) + if err != nil { + global.RSPLog.Sugar().Error(err) + continue + } + + if v == "" { + global.RSPLog.Error("Task not exists ", zap.String("taskid", k)) + os.Exit(1) + } + + if lastKeyAcross.LastKeyAcross.LastKeyCommitTime == 0 { + //time.Sleep(60 * time.Second) + iscommandrunning = true + } + + global.RSPLog.Sugar().Info(v) + + if gjson.Get(v, "taskStatus.status").Int() == 7 { + if lastKeyAcross.LastKeyAcross.LastKeyCommitTime > 0 { + //如果当前时间与lastKeyCommitTime相减超过20000毫秒 iscommandrunning = true + localUnixTimestamp := time.Now().UnixNano() / 1e6 + if localUnixTimestamp-lastKeyAcross.LastKeyAcross.LastKeyCommitTime > 20000 { + iscommandrunning = true + } + } + } + + if gjson.Get(v, "taskStatus").Int() == 1 || + gjson.Get(v, "taskStatus").Int() == 2 || + gjson.Get(v, "taskStatus").Int() == 3 || + gjson.Get(v, "taskStatus").Int() == 6 { + iscommandrunning = false + } + + if gjson.Get(v, "taskStatus").Int() == 5 { + global.RSPLog.Error("sync task broken! ", zap.String("taskid", k), zap.String("task_status", v)) + os.Exit(1) + } + + } + if iscommandrunning { + return + } + time.Sleep(10 * time.Second) + + } +} diff --git a/cases/basecase_test.go b/cases/basecase_test.go new file mode 100644 index 0000000..c6fbc4a --- /dev/null +++ b/cases/basecase_test.go @@ -0,0 +1,17 @@ +package cases + +import ( + "testing" + "time" +) + +func TestBaseCase(t *testing.T) { + duration := 5 * time.Second + genreport := false + creattaskfile := "../single2single.json" + loopstep := 1 + syncserver := "http://114.67.67.7:8080" + + BaseCase(duration, genreport, creattaskfile, int64(loopstep), syncserver) + +} diff --git a/cases/clustercases.go b/cases/clustercases.go new file mode 100644 index 0000000..6117682 --- /dev/null +++ b/cases/clustercases.go @@ -0,0 +1,320 @@ +package cases + +import ( + "context" + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "github.com/pkg/errors" + "github.com/tidwall/gjson" + "os" + "strings" + "sync" + "testcase/commons" + "testcase/compare" + "testcase/generatedata" + "testcase/global" + "testcase/synctaskhandle" + "time" +) + +//基本测试案例单实例2Cluster,无映射关系 +func (tc *TestCase) Single2Cluster() { + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + increment_pool, _ := ants.NewPool(tc.Increment_Threads) + defer increment_pool.Release() + + saddr := gjson.Get(string(createjson), "sourceRedisAddress").String() + taddrs := gjson.Get(string(createjson), "targetRedisAddress").String() + spasswd := gjson.Get(string(createjson), "sourcePassword").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + + taddrsarray := strings.Split(taddrs, ";") + + sopt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + + if spasswd != "" { + sopt.Password = spasswd + } + sclient := commons.GetGoRedisClient(sopt) + + topt := &redis.ClusterOptions{ + Addrs: taddrsarray, + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := redis.NewClusterClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClusterClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + sclient.FlushAll() + tclient.FlushAll() + + for _, v := range taddrsarray { + opt := &redis.Options{ + Addr: v, + } + + if tpasswd != "" { + opt.Password = tpasswd + } + + client := redis.NewClient(opt) + defer client.Close() + client.FlushAll() + + } + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //生成垫底数据 + bgkv := generatedata.GenBigKV{ + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + } + bgkv.GenerateBaseDataParallel(sclient) + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.CreateTask(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for i := 0; i < tc.GenDataThreads; i++ { + bo := &generatedata.OptSingle{ + RedisConn: sclient.Conn(), + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + } + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOpt(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond, true) + wg.Done() + }) + } + wg.Wait() + + //查看任务状态,直到COMMANDRUNING状态 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //数据校验 + compare := &compare.CompareSingle2Cluster{ + Source: sclient, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + } + + compare.CompareDB() +} + +//基本测试案例Cluster2Cluster,无映射关系 +func (tc *TestCase) Cluster2Cluster() { + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + increment_pool, _ := ants.NewPool(tc.Increment_Threads) + defer increment_pool.Release() + + saddrs := gjson.Get(string(createjson), "sourceRedisAddress").String() + taddrs := gjson.Get(string(createjson), "targetRedisAddress").String() + spasswd := gjson.Get(string(createjson), "sourcePassword").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + + saddrsarray := strings.Split(saddrs, ";") + taddrsarray := strings.Split(taddrs, ";") + + sopt := &redis.ClusterOptions{ + Addrs: saddrsarray, + } + + if spasswd != "" { + sopt.Password = spasswd + } + + sclient := redis.NewClusterClient(sopt) + + topt := &redis.ClusterOptions{ + Addrs: taddrsarray, + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := redis.NewClusterClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClusterClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClusterClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + for _, v := range saddrsarray { + opt := &redis.Options{ + Addr: v, + } + + if tpasswd != "" { + opt.Password = tpasswd + } + + client := redis.NewClient(opt) + defer client.Close() + client.FlushAll() + + } + + for _, v := range taddrsarray { + opt := &redis.Options{ + Addr: v, + } + + if tpasswd != "" { + opt.Password = tpasswd + } + + client := redis.NewClient(opt) + defer client.Close() + client.FlushAll() + + } + + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //生成垫底数据 + bgkv := generatedata.GenBigKVCluster{ + RedisClusterClient: sclient, + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + DataGenInterval: time.Duration(tc.DataGenInterval) * time.Millisecond, + } + bgkv.GenerateBaseDataParallelCluster() + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.CreateTask(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for i := 0; i < tc.GenDataThreads; i++ { + bo := &generatedata.OptCluster{ + ClusterClient: sclient, + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + } + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOptCluster(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond) + wg.Done() + }) + } + wg.Wait() + + //查看任务状态,直到COMMANDRUNING状态 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //数据校验 + for _, v := range saddrsarray { + opt := &redis.Options{ + Addr: v, + } + + if tpasswd != "" { + opt.Password = tpasswd + } + + client := redis.NewClient(opt) + defer client.Close() + + compare := &compare.CompareSingle2Cluster{ + Source: client, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + } + compare.CompareDB() + + } + +} diff --git a/cases/importcases.go b/cases/importcases.go new file mode 100644 index 0000000..78bd5aa --- /dev/null +++ b/cases/importcases.go @@ -0,0 +1,332 @@ +package cases + +import ( + "context" + "errors" + "fmt" + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "github.com/tidwall/gjson" + "os" + "strings" + "sync" + "testcase/commons" + "testcase/compare" + "testcase/generatedata" + "testcase/global" + "testcase/synctaskhandle" + "time" +) + +//rdb文件导入功能测试用例 +func (tc *TestCase) ImportRdb2Single() { + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + increment_pool, _ := ants.NewPool(tc.Increment_Threads) + defer increment_pool.Release() + + saddr := tc.GenRdbRedis + spasswd := tc.GenRdbRedisPassword + taddr := gjson.Get(string(createjson), "targetRedisAddress").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + //fileaddr := gjson.Get(string(createjson), "fileAddress").String() + + sopt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + + if spasswd != "" { + sopt.Password = spasswd + } + sclient := commons.GetGoRedisClient(sopt) + + topt := &redis.Options{ + Addr: taddr, + DB: 0, // use default DB + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := commons.GetGoRedisClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + sclient.FlushAll() + tclient.FlushAll() + + //生成垫底数据 + bgkv := generatedata.GenBigKV{ + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + } + bgkv.GenerateBaseDataParallel(sclient) + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for i := 0; i < tc.GenDataThreads; i++ { + bo := &generatedata.OptSingle{ + RedisConn: sclient.Conn(), + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + } + + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOpt(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond, false) + wg.Done() + }) + } + wg.Wait() + + //生成rdb文件 + sclient.Save() + time.Sleep(10 * time.Second) + + //复制rdb文件到redissyncer所在服务器指定目录 + syncserverip := strings.Split(strings.Split(tc.SyncServer, "//")[1], ":")[0] + sshclient, err := commons.GenSshClient(tc.SyncServerOsUser, tc.SyncServerOsUserPassword, syncserverip+tc.SyncServerSshPort) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer sshclient.Close() + session, err := sshclient.NewSession() + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer session.Close() + + rdbip := strings.Split(tc.GenRdbRedis, ":")[0] + sshcmd := "ssh-keyscan " + rdbip + " >> ~/.ssh/known_hosts;" + + "sshpass -p " + tc.GenRdbRedisOsUserPassword + " scp " + tc.GenRdbRedisOsUser + "@" + rdbip + ":" + tc.DumpFilePath + " " + tc.SyncOsFilePath + ";" + fmt.Println(sshcmd) + + cprdbtosyncserver, err := session.CombinedOutput(sshcmd) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + global.RSPLog.Sugar().Info(string(cprdbtosyncserver)) + + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.Import(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //查看任务状态,直到COMMANDRUNING状态 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //数据校验 + compare := &compare.CompareSingle2Single{ + Source: sclient, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + } + + compare.CompareDB() +} + +//Aof文件导入功能测试用例 +func (tc *TestCase) ImportAof2Single() { + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + increment_pool, _ := ants.NewPool(tc.Increment_Threads) + defer increment_pool.Release() + + saddr := tc.GenRdbRedis + spasswd := tc.GenRdbRedisPassword + taddr := gjson.Get(string(createjson), "targetRedisAddress").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + //fileaddr := gjson.Get(string(createjson), "fileAddress").String() + + sopt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + + if spasswd != "" { + sopt.Password = spasswd + } + sclient := commons.GetGoRedisClient(sopt) + + topt := &redis.Options{ + Addr: taddr, + DB: 0, // use default DB + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := commons.GetGoRedisClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + sclient.FlushAll() + tclient.FlushAll() + + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //生成垫底数据 + bgkv := generatedata.GenBigKV{ + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + } + bgkv.GenerateBaseDataParallel(sclient) + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for i := 0; i < tc.GenDataThreads; i++ { + bo := &generatedata.OptSingle{ + RedisConn: sclient.Conn(), + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + } + + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOpt(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond, false) + wg.Done() + }) + } + wg.Wait() + time.Sleep(10 * time.Second) + + //复制aof文件到redissyncer所在服务器指定目录 + syncserverip := strings.Split(strings.Split(tc.SyncServer, "//")[1], ":")[0] + sshclient, err := commons.GenSshClient(tc.SyncServerOsUser, tc.SyncServerOsUserPassword, syncserverip+tc.SyncServerSshPort) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer sshclient.Close() + session, err := sshclient.NewSession() + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer session.Close() + + rdbip := strings.Split(tc.GenRdbRedis, ":")[0] + sshcmd := "ssh-keyscan " + rdbip + " >> ~/.ssh/known_hosts;" + + "cd " + tc.SyncOsFilePath + ";" + "rm -fr *.aof ;" + + "sshpass -p " + tc.GenRdbRedisOsUserPassword + " scp " + tc.GenRdbRedisOsUser + "@" + rdbip + ":" + tc.DumpFilePath + " " + tc.SyncOsFilePath + ";" + fmt.Println(sshcmd) + + cprdbtosyncserver, err := session.CombinedOutput(sshcmd) + + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + global.RSPLog.Sugar().Info(string(cprdbtosyncserver)) + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.Import(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //查看任务状态,直到COMMANDRUNING状态 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //数据校验 + compare := &compare.CompareSingle2Single{ + Source: sclient, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + } + + compare.CompareDB() +} + +func (tc *TestCase) ImportRdb2Cluster() { + +} + +func (tc *TestCase) ImportAof2Cluster() {} diff --git a/cases/listcases.go b/cases/listcases.go new file mode 100644 index 0000000..c4117e5 --- /dev/null +++ b/cases/listcases.go @@ -0,0 +1,12 @@ +package cases + +import ( + "fmt" +) + +func DisplayCasesList() { + fmt.Println("All Cases:") + for k, v := range CaseTypeMap { + fmt.Println(k, v) + } +} diff --git a/cases/listcases_test.go b/cases/listcases_test.go new file mode 100644 index 0000000..4039289 --- /dev/null +++ b/cases/listcases_test.go @@ -0,0 +1,24 @@ +package cases + +import ( + "fmt" + "testing" + "time" +) + +func TestDisplayCasesList(t *testing.T) { + DisplayCasesList() + + ticker := time.NewTicker(time.Second * 5) + defer ticker.Stop() + for { + time.Sleep(3 * time.Second) + fmt.Println("ticker test") + select { + case <-ticker.C: + fmt.Println("ticker be touched") + default: + + } + } +} diff --git a/cases/singlecases.go b/cases/singlecases.go new file mode 100644 index 0000000..fc342e8 --- /dev/null +++ b/cases/singlecases.go @@ -0,0 +1,321 @@ +package cases + +import ( + "context" + "errors" + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "github.com/tidwall/gjson" + "os" + "strconv" + "sync" + "testcase/commons" + "testcase/compare" + "testcase/generatedata" + "testcase/global" + "testcase/synctaskhandle" + "time" +) + +//基本测试案例单实例2单实例,无映射关系 +func (tc *TestCase) Single2Single() { + + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + increment_pool, _ := ants.NewPool(tc.Increment_Threads) + defer increment_pool.Release() + + saddr := gjson.Get(string(createjson), "sourceRedisAddress").String() + taddr := gjson.Get(string(createjson), "targetRedisAddress").String() + spasswd := gjson.Get(string(createjson), "sourcePassword").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + + sopt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + + if spasswd != "" { + sopt.Password = spasswd + } + sclient := commons.GetGoRedisClient(sopt) + + topt := &redis.Options{ + Addr: taddr, + DB: 0, // use default DB + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := commons.GetGoRedisClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + sclient.FlushAll() + tclient.FlushAll() + + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //生成垫底数据 + bgkv := generatedata.GenBigKV{ + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + } + bgkv.GenerateBaseDataParallel(sclient) + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.CreateTask(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for i := 0; i < tc.GenDataThreads; i++ { + bo := &generatedata.OptSingle{ + RedisConn: sclient.Conn(), + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + } + + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOpt(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond, false) + wg.Done() + }) + } + wg.Wait() + + //查看任务状态,验证任务状态是否可以关闭,并保证数据同步完成 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //数据校验 + compare := &compare.CompareSingle2Single{ + Source: sclient, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + } + + compare.CompareDB() + +} + +//Single2SingleWithDBMap,基本测试案例单实例2单实例,有映射关系 +func (tc TestCase) Single2SingleWithDBMap() { + createjson := tc.ParseJsonFile(tc.CreateTaskFile) + + saddr := gjson.Get(string(createjson), "sourceRedisAddress").String() + taddr := gjson.Get(string(createjson), "targetRedisAddress").String() + spasswd := gjson.Get(string(createjson), "sourcePassword").String() + tpasswd := gjson.Get(string(createjson), "targetPassword").String() + taskname := gjson.Get(string(createjson), "taskName").String() + dbmap := gjson.Get(string(createjson), "dbMapper").Map() + + increment_pool, err := ants.NewPool(len(dbmap)) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + + defer increment_pool.Release() + + sopt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + + if spasswd != "" { + sopt.Password = spasswd + } + sclient := commons.GetGoRedisClient(sopt) + + topt := &redis.Options{ + Addr: taddr, + DB: 0, // use default DB + } + + if tpasswd != "" { + topt.Password = tpasswd + } + + tclient := commons.GetGoRedisClient(topt) + + defer sclient.Close() + defer tclient.Close() + + //check redis 连通性 + if !commons.CheckRedisClientConnect(sclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect source redis")) + os.Exit(1) + } + if !commons.CheckRedisClientConnect(tclient) { + global.RSPLog.Sugar().Error(errors.New("Cannot connect target redis")) + os.Exit(1) + } + + //check redissycner-server 是否可用 + + //清理redis + sclient.FlushAll() + tclient.FlushAll() + + //清理任务 + global.RSPLog.Sugar().Info("Clean Task beging...") + synctaskhandle.RemoveTaskByName(tc.SyncServer, taskname) + global.RSPLog.Sugar().Info("Clean Task end") + + //生成垫底数据 + for k, _ := range dbmap { + db, err := strconv.Atoi(k) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + sopt.DB = db + client := commons.GetGoRedisClient(sopt) + defer client.Close() + bgkv := generatedata.GenBigKV{ + KeySuffix: commons.RandString(tc.BigKV_KeySuffix_Len), + Length: tc.BigKV_Loopstep, + EXPIRE: time.Duration(tc.BigKV_EXPIRE) * time.Second, + ValuePrefix: commons.RandString(tc.BigKV_ValuePrefix_Len), + DB: db, + } + bgkv.GenerateBaseDataParallel(client) + } + + //创建任务 + global.RSPLog.Sugar().Info("Create Task beging...") + taskids := synctaskhandle.CreateTask(tc.SyncServer, string(createjson)) + global.RSPLog.Sugar().Info("Task Id is: ", taskids) + + //启动任务 + for _, v := range taskids { + synctaskhandle.StartTask(tc.SyncServer, v) + } + + global.RSPLog.Sugar().Info("Create Task end") + + //生成增量数据 + d := time.Now().Add(time.Duration(tc.GenDataDuration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + + for k, _ := range dbmap { + db, err := strconv.Atoi(k) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + sopt.DB = db + client := commons.GetGoRedisClient(sopt) + defer client.Close() + + bo := &generatedata.OptSingle{ + RedisConn: client.Conn(), + KeySuffix: commons.RandString(tc.Increment_KeySuffix_Len), + Loopstep: tc.Increment_Loopstep, + EXPIRE: time.Duration(tc.Increment_EXPIRE) * time.Second, + DB: db, + } + + wg.Add(1) + increment_pool.Submit(func() { + bo.KeepExecBasicOpt(ctx, time.Duration(tc.DataGenInterval)*time.Millisecond, false) + wg.Done() + }) + + } + wg.Wait() + + //查看任务状态,直到COMMANDRUNING状态 + tc.CheckSyncTaskStatus(taskids) + global.RSPLog.Sugar().Info("Check task status end") + + //停止任务 + for _, id := range taskids { + synctaskhandle.StopTaskByIds(tc.SyncServer, id) + } + + //任务停止保护时间 + time.Sleep(60 * time.Second) + + //数据校验 + for k, v := range dbmap { + sdb, err := strconv.Atoi(k) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + + tdb, err := strconv.Atoi(v.Raw) + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + + sopt.DB = sdb + topt.DB = tdb + + sclient := commons.GetGoRedisClient(sopt) + tclient := commons.GetGoRedisClient(topt) + defer sclient.Close() + defer tclient.Close() + + compare := &compare.CompareSingle2Single{ + Source: sclient, + Target: tclient, + BatchSize: tc.Compare_BatchSize, + TTLDiff: tc.Compare_TTLDiff, + CompareThreads: tc.Compare_Threads, + SourceDB: sdb, + TargetDB: tdb, + } + + compare.CompareDB() + } + +} diff --git a/caseyamls/cluster2cluster.yml b/caseyamls/cluster2cluster.yml new file mode 100644 index 0000000..3fb32e4 --- /dev/null +++ b/caseyamls/cluster2cluster.yml @@ -0,0 +1,16 @@ +syncserver: "http://114.67.127.196:8080" +createtaskfile: "./tasks/cluster2cluster.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 4 +bigkvkeysuffixlen: 4 +bigkvloopstep: 300 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 3 diff --git a/caseyamls/importaof2single.yml b/caseyamls/importaof2single.yml new file mode 100644 index 0000000..8434086 --- /dev/null +++ b/caseyamls/importaof2single.yml @@ -0,0 +1,25 @@ +syncserver: "http://114.67.127.196:8080" +syncserversshport: ":22" +syncserverosuser: "root" +syncserverosuserpassword: "Git785230" +createtaskfile: "./tasks/importaof2single.json" +genrdbredis: "114.67.67.7:6379" +genrdbredispassword: "redistest0102" +genrdbredisosuser: "root" +genrdbredisosuserpassword: "Git785230" +dumpfilepath: "/root/redis-compose/data/appendonly.aof" +syncosfilepath: "/home/redissyncer/redissyncer/data/" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 4 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 36000 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 36000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 5 diff --git a/caseyamls/importrdb2single.yml b/caseyamls/importrdb2single.yml new file mode 100644 index 0000000..4c5d21b --- /dev/null +++ b/caseyamls/importrdb2single.yml @@ -0,0 +1,25 @@ +syncserver: "http://114.67.127.196:8080" +syncserversshport: ":22" +syncserverosuser: "root" +syncserverosuserpassword: "Git785230" +createtaskfile: "./tasks/importrdb2single.json" +genrdbredis: "10.0.1.101:6379" +genrdbredispassword: "redistest0102" +genrdbredisosuser: "root" +genrdbredisosuserpassword: "Git785230" +dumpfilepath: "/root/redis-compose/data/dump.rdb" +syncosfilepath: "/home/redissyncer/redissyncer/data/" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 4 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 36000 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 36000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 4 diff --git a/caseyamls/single2cluster.yml b/caseyamls/single2cluster.yml new file mode 100644 index 0000000..ab35261 --- /dev/null +++ b/caseyamls/single2cluster.yml @@ -0,0 +1,16 @@ +syncserver: "http://114.67.127.196:8080" +createtaskfile: "./tasks/single2cluster.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 4 +bigkvkeysuffixlen: 4 +bigkvloopstep: 100 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 2 diff --git a/caseyamls/single2single.yml b/caseyamls/single2single.yml new file mode 100644 index 0000000..9e0d11f --- /dev/null +++ b/caseyamls/single2single.yml @@ -0,0 +1,31 @@ +# redissyncer 地址 +syncserver: "http://dev:8888" +# 任务创建json +createtaskfile: "./tasks/single2single.json" +# 生成增量数据持续多长时间,单位为秒 +gendataduration: 60 +# 生成增量数据间隔,单位毫秒,该参数用于调节怎量数据生成的频率,避免频次太高引起redis宕机 +datageninterval: 20 +# 数据生成器线程数,并发生成数据,默认为cpu核数 +gendatathreads: 8 +# 垫底数据key后缀长度,key的结构为keytype_keysuffix +bigkvkeysuffixlen: 4 +# 生成垫底数据的循环次数,循环次数决定生成垫底数据的数据量 +bigkvloopstep: 20 +# 垫底数据的过期时间,单位为秒 +bigkvexpire: 3600 +# 垫底数据value前缀的长度,根据该长度随机生成一个字符串用于value前缀 +bigkvvalueprefixlen: 512 +# 增量数据key后缀,key的结构为keytype_keysuffix +incrementkeysuffixlen: 4 +# 增量数据循环长度,影响增量数据value长度或操作次数 +incrementloopstep: 20 +# 增量数据过期时间,单位秒 +incrementexpire: 18000 +# 数据校验每批次数量 +comparebatchsize: 50 +# 数据校验线程数量 +comparethreads: 2 +# 数据校验ttl差值,单位毫秒,差值在该范围内视为通过 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v3_v3.yml b/caseyamls/single2single_v3_v3.yml new file mode 100644 index 0000000..802dcc9 --- /dev/null +++ b/caseyamls/single2single_v3_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v3_v3.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v3_v4.yml b/caseyamls/single2single_v3_v4.yml new file mode 100644 index 0000000..a166928 --- /dev/null +++ b/caseyamls/single2single_v3_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v3_v4.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v3_v5.yml b/caseyamls/single2single_v3_v5.yml new file mode 100644 index 0000000..da00811 --- /dev/null +++ b/caseyamls/single2single_v3_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v3_v5.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v3_v6.yml b/caseyamls/single2single_v3_v6.yml new file mode 100644 index 0000000..c4834ec --- /dev/null +++ b/caseyamls/single2single_v3_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v3_v6.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v4_v3.yml b/caseyamls/single2single_v4_v3.yml new file mode 100644 index 0000000..9e148a4 --- /dev/null +++ b/caseyamls/single2single_v4_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v4_v3.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v4_v4.yml b/caseyamls/single2single_v4_v4.yml new file mode 100644 index 0000000..d8cfc0b --- /dev/null +++ b/caseyamls/single2single_v4_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v4_v4.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v4_v5.yml b/caseyamls/single2single_v4_v5.yml new file mode 100644 index 0000000..c9ef9af --- /dev/null +++ b/caseyamls/single2single_v4_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v4_v5.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v4_v6.yml b/caseyamls/single2single_v4_v6.yml new file mode 100644 index 0000000..fe0f455 --- /dev/null +++ b/caseyamls/single2single_v4_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v4_v6.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v5_v3.yml b/caseyamls/single2single_v5_v3.yml new file mode 100644 index 0000000..c5f226a --- /dev/null +++ b/caseyamls/single2single_v5_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v5_v3.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v5_v4.yml b/caseyamls/single2single_v5_v4.yml new file mode 100644 index 0000000..a92eb10 --- /dev/null +++ b/caseyamls/single2single_v5_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v5_v4.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v5_v5.yml b/caseyamls/single2single_v5_v5.yml new file mode 100644 index 0000000..a3e5cc4 --- /dev/null +++ b/caseyamls/single2single_v5_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v5_v5.json" +gendataduration: 10 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 \ No newline at end of file diff --git a/caseyamls/single2single_v5_v6.yml b/caseyamls/single2single_v5_v6.yml new file mode 100644 index 0000000..bcd971e --- /dev/null +++ b/caseyamls/single2single_v5_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v5_v6.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v6_v3.yml b/caseyamls/single2single_v6_v3.yml new file mode 100644 index 0000000..255854d --- /dev/null +++ b/caseyamls/single2single_v6_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v6_v3.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v6_v4.yml b/caseyamls/single2single_v6_v4.yml new file mode 100644 index 0000000..e632c95 --- /dev/null +++ b/caseyamls/single2single_v6_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v6_v4.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v6_v5.yml b/caseyamls/single2single_v6_v5.yml new file mode 100644 index 0000000..4ea37dc --- /dev/null +++ b/caseyamls/single2single_v6_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v6_v5.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2single_v6_v6.yml b/caseyamls/single2single_v6_v6.yml new file mode 100644 index 0000000..4418d09 --- /dev/null +++ b/caseyamls/single2single_v6_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2single_v6_v6.json" +gendataduration: 60 +datageninterval: 20 +gendatathreads: 8 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 18000 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 0 diff --git a/caseyamls/single2singlewithdbmap.yml b/caseyamls/single2singlewithdbmap.yml new file mode 100644 index 0000000..d565481 --- /dev/null +++ b/caseyamls/single2singlewithdbmap.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v3_v3.yml b/caseyamls/single2singlewithdbmap_v3_v3.yml new file mode 100644 index 0000000..cd97b16 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v3_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v3_v3.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v3_v4.yml b/caseyamls/single2singlewithdbmap_v3_v4.yml new file mode 100644 index 0000000..4237d5a --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v3_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v3_v4.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v3_v5.yml b/caseyamls/single2singlewithdbmap_v3_v5.yml new file mode 100644 index 0000000..1d2d17b --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v3_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v3_v5.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v3_v6.yml b/caseyamls/single2singlewithdbmap_v3_v6.yml new file mode 100644 index 0000000..64075e6 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v3_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v3_v6.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v4_v3.yml b/caseyamls/single2singlewithdbmap_v4_v3.yml new file mode 100644 index 0000000..4af5155 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v4_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v4_v3.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v4_v4.yml b/caseyamls/single2singlewithdbmap_v4_v4.yml new file mode 100644 index 0000000..943c552 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v4_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v4_v4.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v4_v5.yml b/caseyamls/single2singlewithdbmap_v4_v5.yml new file mode 100644 index 0000000..c1679a9 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v4_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v4_v5.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v4_v6.yml b/caseyamls/single2singlewithdbmap_v4_v6.yml new file mode 100644 index 0000000..853fab1 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v4_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v4_v6.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v5_v3.yml b/caseyamls/single2singlewithdbmap_v5_v3.yml new file mode 100644 index 0000000..c6d5cad --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v5_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v5_v3.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v5_v4.yml b/caseyamls/single2singlewithdbmap_v5_v4.yml new file mode 100644 index 0000000..aa5405d --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v5_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v5_v4.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v5_v5.yml b/caseyamls/single2singlewithdbmap_v5_v5.yml new file mode 100644 index 0000000..2c99c1f --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v5_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v5_v5.json" +gendataduration: 60 +datageninterval: 300 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 \ No newline at end of file diff --git a/caseyamls/single2singlewithdbmap_v5_v6.yml b/caseyamls/single2singlewithdbmap_v5_v6.yml new file mode 100644 index 0000000..22a358b --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v5_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v5_v6.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v6_v3.yml b/caseyamls/single2singlewithdbmap_v6_v3.yml new file mode 100644 index 0000000..03b6780 --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v6_v3.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v6_v3.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v6_v4.yml b/caseyamls/single2singlewithdbmap_v6_v4.yml new file mode 100644 index 0000000..b4cb2ad --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v6_v4.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v6_v4.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v6_v5.yml b/caseyamls/single2singlewithdbmap_v6_v5.yml new file mode 100644 index 0000000..516c78e --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v6_v5.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v6_v5.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/caseyamls/single2singlewithdbmap_v6_v6.yml b/caseyamls/single2singlewithdbmap_v6_v6.yml new file mode 100644 index 0000000..df292fa --- /dev/null +++ b/caseyamls/single2singlewithdbmap_v6_v6.yml @@ -0,0 +1,16 @@ +syncserver: "http://dev:8888" +createtaskfile: "./tasks/single2singlewithdbmap_v6_v6.json" +gendataduration: 60 +datageninterval: 30 +gendatathreads: 2 +bigkvkeysuffixlen: 4 +bigkvloopstep: 20 +bigkvexpire: 3600 +bigkvvalueprefixlen: 512 +incrementkeysuffixlen: 4 +incrementloopstep: 20 +incrementexpire: 1800 +comparebatchsize: 50 +comparethreads: 2 +comparettldiff: 100000 +casetype: 1 diff --git a/cmd/caselist.go b/cmd/caselist.go new file mode 100644 index 0000000..50c4e83 --- /dev/null +++ b/cmd/caselist.go @@ -0,0 +1,23 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "testcase/cases" +) + +// NewConfigCommand return a config subcommand of rootCmd +func NewCaseListCommand() *cobra.Command { + cl := &cobra.Command{ + Use: "caselist ", + Short: "list all cases", + Run: caseListCommandFunc, + } + return cl +} + + + +func caseListCommandFunc(cmd *cobra.Command, args []string) { + cases.DisplayCasesList() +} + diff --git a/cmd/config.go b/cmd/config.go new file mode 100644 index 0000000..7071d02 --- /dev/null +++ b/cmd/config.go @@ -0,0 +1,65 @@ +package cmd + +import ( + "github.com/spf13/cobra" + "testcase/commons" + "testcase/global" +) + +// NewConfigCommand return a config subcommand of rootCmd +func NewConfigCommand() *cobra.Command { + conf := &cobra.Command{ + Use: "config ", + Short: "tune pd configs", + } + conf.AddCommand(NewShowConfigCommand()) + return conf +} + +// NewShowConfigCommand return a show subcommand of configCmd +func NewShowConfigCommand() *cobra.Command { + sc := &cobra.Command{ + Use: "show [replication|label-property|all]", + Short: "show replication and schedule config of PD", + //Run: showConfigCommandFunc, + } + sc.AddCommand(NewShowAllConfigCommand()) + return sc +} + +// NewShowAllConfigCommand return a show all subcommand of show subcommand +func NewShowAllConfigCommand() *cobra.Command { + sc := &cobra.Command{ + Use: "all", + Short: "show all config of redissyncer-cli", + Run: showAllConfigCommandFunc, + } + return sc +} + +// NewShowScheduleConfigCommand return a show all subcommand of show subcommand +func NewShowScheduleConfigCommand() *cobra.Command { + sc := &cobra.Command{ + Use: "schedule", + Short: "show schedule config of PD", + Run: showScheduleConfigCommandFunc, + } + return sc +} + +func showConfigCommandFunc(cmd *cobra.Command, args []string) { + cmd.Println(cmd.Args) +} + +func showScheduleConfigCommandFunc(cmd *cobra.Command, args []string) { + cmd.Println(cmd.Args) +} + +func showAllConfigCommandFunc(cmd *cobra.Command, args []string) { + configs, err := commons.MapToYamlString(global.RSPViper.AllSettings()) + if err != nil { + cmd.PrintErrln(err) + return + } + cmd.Println(configs) +} diff --git a/cmd/exec.go b/cmd/exec.go new file mode 100644 index 0000000..309eca0 --- /dev/null +++ b/cmd/exec.go @@ -0,0 +1,108 @@ +package cmd + +import ( + "fmt" + "github.com/spf13/cobra" + "strings" + "testcase/cases" + "testcase/commons" +) + +func NewExecCommand() *cobra.Command { + exec:= &cobra.Command{ + Use: "exec ", + Short: "Execute scenario test", + } + exec.AddCommand(NewExecFromFileCommand()) + exec.AddCommand(NewExecFromDirectoryCommand()) + return exec +} + +func NewExecFromFileCommand() *cobra.Command { + sc := &cobra.Command{ + Use: "file ", + Short: "execute test from yaml file", + Run: execTestCaseFromFileFunc, + } + return sc +} + +func NewExecFromDirectoryCommand() *cobra.Command { + sc := &cobra.Command{ + Use: "dir ", + Short: "execute test from dirctory include yml files", + Run: execTestCaseFromDirectoryFunc, + } + return sc +} + +func execTestCaseFromFileFunc(cmd *cobra.Command, args []string) { + if len(args) == 0 { + cmd.PrintErrln("Please input test yaml file path") + return + } + + for _, v := range args { + //判断文件是否存在 + if !commons.FileExists(v) { + cmd.PrintErrf("file %s not exists \n", v) + continue + } + + //判断文件格式 + yml := strings.HasSuffix(v, ".yml") + yaml:=strings.HasSuffix(v, ".yaml") + if !yml && !yaml{ + cmd.PrintErrf("file %s not a yml or yaml file \n", v) + continue + } + + tc := cases.NewTestCase() + if err:=tc.ParseYamlFile(v);err!=nil{ + cmd.PrintErrln(err) + } + fmt.Println(tc) + tc.Exec() + } +} + + +func execTestCaseFromDirectoryFunc(cmd *cobra.Command, args []string) { + if len(args) != 1 { + cmd.PrintErrln("Please input directory include yaml files") + return + } + + if !commons.IsDir(args[0]) { + cmd.PrintErrf(" %s not a directory \n", args[0]) + return + } + + files, err := commons.GetAllFiles(args[0]) + if err != nil { + cmd.PrintErrln(err) + return + } + + fmt.Println(files) + yamlfiles := []string{} + for _, v := range files { + //过滤指定格式 + ok := strings.HasSuffix(v, ".yml") || strings.HasSuffix(v, ".yaml") + if ok { + yamlfiles = append(yamlfiles, v) + } + } + + if len(yamlfiles) == 0 { + cmd.PrintErrln("No yaml files in the folder!") + return + } + for _, v := range yamlfiles { + tc := cases.NewTestCase() + tc.ParseYamlFile(v) + fmt.Println(tc) + tc.Exec() + } + +} \ No newline at end of file diff --git a/cmd/gendata.go b/cmd/gendata.go new file mode 100644 index 0000000..190da59 --- /dev/null +++ b/cmd/gendata.go @@ -0,0 +1,70 @@ +package cmd + +import ( + "errors" + "github.com/spf13/cobra" + "gopkg.in/yaml.v2" + "io" + "os" + "strings" + "testcase/commons" + "testcase/generatedata" +) + + + +func NewGenDataCommand() *cobra.Command { + gd := &cobra.Command{ + Use: "gendata ", + Short: "Generate basic data through yaml description file", + Run: genDataCommandFunc, + } + //gd.AddCommand(NewBaseDataCommand()) + return gd +} + +func genDataCommandFunc(cmd *cobra.Command, args []string) { + if len(args) == 0 { + cmd.PrintErrln("Please input test yaml file path") + return + } + + for _, v := range args { + //cmd.Println(v) + //判断文件是否存在 + if !commons.FileExists(v) { + cmd.PrintErrf("file %s not exists \n", v) + continue + } + + //判断文件格式 + isYaml := strings.HasSuffix(v, ".yml") || strings.HasSuffix(v, ".yaml") + if !isYaml { + cmd.PrintErrf("file %s not a yml or yaml file \n", v) + continue + } + + file, err := os.Open(v) + if err != nil { + cmd.PrintErrln(err) + } + dec := yaml.NewDecoder(file) + + for { + data := generatedata.GenData{} + + err := dec.Decode(&data) + if err != nil { + if !errors.Is(err, io.EOF) { + cmd.PrintErrln(err) + } + break + } + + data.Exec() + + } + + + } +} diff --git a/cmd/listcases.go.bak b/cmd/listcases.go.bak new file mode 100644 index 0000000..6c30bbf --- /dev/null +++ b/cmd/listcases.go.bak @@ -0,0 +1,44 @@ +/* +Copyright © 2020 NAME HERE + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package cmd + +import ( + "fmt" + "testcase/cases" + + "github.com/spf13/cobra" +) + +// listcasesCmd represents the listcases command +var listcasesCmd = &cobra.Command{ + Use: "listcases", + Short: "A brief description of your command", + Long: `A longer description that spans multiple lines and likely contains examples +and usage of using your command. For example: + +Cobra is a CLI library for Go that empowers applications. +This application is a tool to generate the needed files +to quickly create a Cobra application.`, + Run: func(cmd *cobra.Command, args []string) { + fmt.Println("listcases called") + cases.DisplayCasesList() + }, +} + +func init() { + rootCmd.AddCommand(listcasesCmd) + +} diff --git a/commons/fileutil.go b/commons/fileutil.go new file mode 100644 index 0000000..7a52123 --- /dev/null +++ b/commons/fileutil.go @@ -0,0 +1,117 @@ +package commons + +import ( + "bufio" + "bytes" + "fmt" + "io/ioutil" + "os" + "strings" +) + +//AppendLineToFile 向文件追加行 +func AppendLineToFile(line bytes.Buffer, filename string) { + + f, err := os.OpenFile(filename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644) + if err != nil { + panic(err) + } + + defer f.Close() + w := bufio.NewWriter(f) + fmt.Fprintln(w, line.String()) + w.Flush() +} + +func WriteFile(content []byte) error { + err := ioutil.WriteFile("output.txt", content, 0666) + return err +} + +// Exists 用于判断所给路径文件或文件夹是否存在 +func FileExists(path string) bool { + _, err := os.Stat(path) //os.Stat获取文件信息 + if err != nil { + if os.IsExist(err) { + return true + } + return false + } + return true +} + +// IsDir 判断所给路径是否为文件夹 +func IsDir(path string) bool { + s, err := os.Stat(path) + if err != nil { + return false + } + return s.IsDir() +} + +//IsFile 判断所给路径是否为文件 +func IsFile(path string) bool { + return !IsDir(path) +} + +//获取指定目录下的所有文件和目录 +func GetFilesAndDirs(dirPth string) (files []string, dirs []string, err error) { + dir, err := ioutil.ReadDir(dirPth) + if err != nil { + return nil, nil, err + } + + PthSep := string(os.PathSeparator) + //suffix = strings.ToUpper(suffix) //忽略后缀匹配的大小写 + + for _, fi := range dir { + if fi.IsDir() { // 目录, 递归遍历 + dirs = append(dirs, dirPth+PthSep+fi.Name()) + GetFilesAndDirs(dirPth + PthSep + fi.Name()) + } else { + // 过滤指定格式 + ok := strings.HasSuffix(fi.Name(), ".go") + if ok { + files = append(files, dirPth+PthSep+fi.Name()) + } + } + } + + return files, dirs, nil +} + +//获取指定目录下的所有文件,包含子目录下的文件 +func GetAllFiles(dirPth string) (files []string, err error) { + var dirs []string + dir, err := ioutil.ReadDir(dirPth) + if err != nil { + return nil, err + } + + PthSep := string(os.PathSeparator) + //suffix = strings.ToUpper(suffix) //忽略后缀匹配的大小写 + + for _, fi := range dir { + if fi.IsDir() { // 目录, 递归遍历 + dirs = append(dirs, dirPth+PthSep+fi.Name()) + GetAllFiles(dirPth + PthSep + fi.Name()) + } else { + // 过滤指定格式 + //ok := strings.HasSuffix(fi.Name(), ".go") + //if ok { + // files = append(files, dirPth+PthSep+fi.Name()) + //} + files = append(files, dirPth+PthSep+fi.Name()) + } + } + + // 读取子目录下文件 + for _, table := range dirs { + temp, _ := GetAllFiles(table) + for _, temp1 := range temp { + files = append(files, temp1) + } + } + + return files, nil +} diff --git a/commons/interfaceutil.go b/commons/interfaceutil.go new file mode 100644 index 0000000..d338e16 --- /dev/null +++ b/commons/interfaceutil.go @@ -0,0 +1,11 @@ +package commons + +import "reflect" + +func IsNil(i interface{}) bool { + vi := reflect.ValueOf(i) + if vi.Kind() == reflect.Ptr { + return vi.IsNil() + } + return false +} diff --git a/commons/randutil.go b/commons/randutil.go new file mode 100644 index 0000000..12e205d --- /dev/null +++ b/commons/randutil.go @@ -0,0 +1,38 @@ +package commons + +import ( + "math/rand" + "time" + "github.com/satori/go.uuid" +) + +const charset = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789" + +//生成随机字符串 +func StringWithCharset(length int, charset string) string { + var seededRand *rand.Rand = rand.New( + rand.NewSource(time.Now().UnixNano())) + b := make([]byte, length) + for i := range b { + b[i] = charset[seededRand.Intn(len(charset))] + } + return string(b) +} + +func RandString(length int) string { + //var seededRand *rand.Rand = rand.New( + // rand.NewSource(time.Now().UnixNano())) + rand.Seed(time.Now().UnixNano()) + + b := make([]byte, length) + for i := range b { + //b[i] = charset[seededRand.Intn(len(charset))] + b[i] = charset[rand.Intn(len(charset))] + + } + return string(b) +} + +func GetUUID() string { + return uuid.NewV4().String() +} diff --git a/commons/randutil_test.go b/commons/randutil_test.go new file mode 100644 index 0000000..e57ac72 --- /dev/null +++ b/commons/randutil_test.go @@ -0,0 +1,15 @@ +package commons + +import ( + "fmt" + "testing" +) + +func TestStringWithCharset(t *testing.T) { + var randing = RandString(10) + fmt.Println(randing) +} + +func TestGetUUID(t *testing.T) { + fmt.Println(GetUUID()) +} diff --git a/commons/redisutil.go b/commons/redisutil.go new file mode 100644 index 0000000..d516efb --- /dev/null +++ b/commons/redisutil.go @@ -0,0 +1,35 @@ +package commons + +import ( + redis "github.com/go-redis/redis/v7" +) + +//GetGoRedisClient 获取redis client +func GetGoRedisClient(opt *redis.Options) *redis.Client { + client := redis.NewClient(opt) + return client +} + +func GetGoRedisConn(opt *redis.Options) *redis.Conn { + client := redis.NewClient(opt) + return client.Conn() +} + +//redisserver联通性校验 +func CheckRedisClientConnect(r *redis.Client) bool { + _, err := r.Ping().Result() + if err != nil { + logger.Error(err.Error()) + return false + } + return true +} + +func CheckRedisClusterClientConnect(r *redis.ClusterClient) bool { + _, err := r.Ping().Result() + if err != nil { + logger.Error(err.Error()) + return false + } + return true +} diff --git a/commons/report.go b/commons/report.go new file mode 100644 index 0000000..8d3b672 --- /dev/null +++ b/commons/report.go @@ -0,0 +1,37 @@ +package commons + +import ( + "encoding/json" + "io/ioutil" + "os" + "testcase/global" + "time" +) + +var logger = global.GetInstance() + +type Report struct { + ReportContent map[string]interface{} +} + +func (r Report) Json() (jsonresult string, err error) { + bodystr, err := json.MarshalIndent(r, "", " ") + return string(bodystr), err +} + +func (r Report) JsonToFile() { + + now := time.Now().Format("20060102150405000") + filename := "report_" + now + ".json" + bodystr, err := json.MarshalIndent(r.ReportContent, "", " ") + + if err != nil { + logger.Error(err) + os.Exit(1) + } + writeerr := ioutil.WriteFile(filename, bodystr, 0666) + if writeerr != nil { + logger.Error(writeerr) + } + +} diff --git a/commons/sshutil.go b/commons/sshutil.go new file mode 100644 index 0000000..8383cfd --- /dev/null +++ b/commons/sshutil.go @@ -0,0 +1,24 @@ +package commons + +import ( + "golang.org/x/crypto/ssh" + "net" +) + +func Ssh() { + +} + +func GenSshClient(user string, password string, addr string) (*ssh.Client, error) { + config := &ssh.ClientConfig{ + User: user, + Auth: []ssh.AuthMethod{ + ssh.Password(password), + }, + HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error { + return nil + }, + } + // Dial your ssh server. + return ssh.Dial("tcp", addr, config) +} diff --git a/commons/sshutil_test.go b/commons/sshutil_test.go new file mode 100644 index 0000000..053b1a1 --- /dev/null +++ b/commons/sshutil_test.go @@ -0,0 +1,40 @@ +package commons + +import ( + "golang.org/x/crypto/ssh" + "log" + "net" + "testing" +) + +func TestSsh(t *testing.T) { + //var hostKey ssh.PublicKey + config := &ssh.ClientConfig{ + User: "root", + Auth: []ssh.AuthMethod{ + ssh.Password("Git785230"), + }, + HostKeyCallback: func(hostname string, remote net.Addr, key ssh.PublicKey) error { + return nil + }, + //HostKeyCallback: ssh.InsecureIgnoreHostKey(), + } + // Dial your ssh server. + conn, err := ssh.Dial("tcp", "114.67.83.131:22", config) + if err != nil { + log.Fatal("unable to connect: ", err) + } + defer conn.Close() + + session, err := conn.NewSession() + if err != nil { + logger.Error(err) + } + defer session.Close() + + combo, err := session.CombinedOutput("whomai;ls -al;") + if err != nil { + logger.Error(err) + } + log.Println(string(combo)) +} diff --git a/commons/yamlutile.go b/commons/yamlutile.go new file mode 100644 index 0000000..b031134 --- /dev/null +++ b/commons/yamlutile.go @@ -0,0 +1,50 @@ +package commons + +import ( + "gopkg.in/yaml.v2" + "io/ioutil" + "os" + "sync" +) + +var lock sync.Mutex + +//YamlFileToMap Convert yaml fil to map +func YamlFileToMap(configfile string) (*map[interface{}]interface{}, error) { + yamlmap := make(map[interface{}]interface{}) + yamlFile, err := ioutil.ReadFile(configfile) + if err != nil { + return nil, err + } + err = yaml.Unmarshal(yamlFile, yamlmap) + if err != nil { + return nil, err + } + return &yamlmap, nil +} + +//MapToYamlString conver map to yaml +func MapToYamlString(yamlmap map[string]interface{}) (string, error) { + lock.Lock() + defer lock.Unlock() + d, err := yaml.Marshal(&yamlmap) + if err != nil { + return "", err + } + return string(d), nil +} + +func ParseJsonFile(filepath string) ([]byte, error) { + jsonFile, err := os.Open(filepath) + defer jsonFile.Close() + + if err != nil { + return nil, err + } + + jsonbytes, err := ioutil.ReadAll(jsonFile) + if err != nil { + return nil, err + } + return jsonbytes, nil +} diff --git a/compare/compare.go b/compare/compare.go new file mode 100644 index 0000000..874d459 --- /dev/null +++ b/compare/compare.go @@ -0,0 +1,34 @@ +package compare + +import ( + "testcase/global" + //"testcase/globalzap" +) + +var zaplogger = global.RSPLog + +type CompareResult struct { + IsEqual bool + NotEqualReason map[string]interface{} + KeyDiffReason []interface{} + KeyType string + Key string + SourceDB int //源redis DB number + TargetDB int //目标redis DB number +} + +func NewCompareResult() CompareResult { + return CompareResult{ + IsEqual: true, + } +} + +type CompareData interface { + CompareDB() + CompareKeys(keys []string) + CompareString(key string) *CompareResult + CompareList(key string) *CompareResult + CompareHash(key string) *CompareResult + CompareSet(key string) *CompareResult + CompareZset(key string) *CompareResult +} diff --git a/compare/comparesingle2cluster.go b/compare/comparesingle2cluster.go new file mode 100644 index 0000000..747d63d --- /dev/null +++ b/compare/comparesingle2cluster.go @@ -0,0 +1,644 @@ +package compare + +import ( + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "go.uber.org/zap" + "math" + "runtime" + "strconv" + "sync" + "testcase/global" + "time" +) + +type CompareSingle2Cluster struct { + Source *redis.Client //源redis single + Target *redis.ClusterClient //目标redis single + LogOut bool //是否输出日志 + LogOutPath string //日志输出路径 + BatchSize int64 //比较List、Set、Zset类型时的每批次值的数量 + CompareThreads int //比较db线程数量 + TTLDiff float64 //TTL最小差值 + SourceDB int //源redis DB number + TargetDB int //目标redis DB number +} + +func (compare *CompareSingle2Cluster) CompareDB() { + + wg := sync.WaitGroup{} + threads := runtime.NumCPU() + if compare.CompareThreads > 0 { + threads = compare.CompareThreads + } + cursor := uint64(0) + global.RSPLog.Sugar().Info("CompareSingle2Cluster DB beging") + ticker := time.NewTicker(time.Second * 20) + defer ticker.Stop() + + pool, err := ants.NewPool(threads) + + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer pool.Release() + + for { + result, c, err := compare.Source.Scan(cursor, "*", compare.BatchSize).Result() + + if err != nil { + global.RSPLog.Sugar().Info(result, c, err) + return + } + + //当pool有活动worker时提交异步任务 + for { + if pool.Free() > 0 { + wg.Add(1) + pool.Submit(func() { + compare.CompareKeys(result) + wg.Done() + }) + break + } + } + cursor = c + + if c == 0 { + break + } + select { + case <-ticker.C: + global.RSPLog.Sugar().Info("Comparing...") + default: + continue + } + } + wg.Wait() + global.RSPLog.Sugar().Info("CompareSingle2Cluster End") +} + +func (compare *CompareSingle2Cluster) CompareKeys(keys []string) { + var result *CompareResult + for _, v := range keys { + keytype, err := compare.Source.Type(v).Result() + if err != nil { + global.RSPLog.Sugar().Error(err) + continue + } + result = nil + switch { + case keytype == "string": + result = compare.CompareString(v) + case keytype == "list": + result = compare.CompareList(v) + case keytype == "set": + result = compare.CompareSet(v) + case keytype == "zset": + result = compare.CompareZset(v) + case keytype == "hash": + result = compare.CompareHash(v) + default: + global.RSPLog.Info("No type find in compare list", zap.String("key", v), zap.String("type", keytype)) + } + + if result != nil && !result.IsEqual { + global.RSPLog.Info("", zap.Any("CompareResult", result)) + } + } +} + +func (compare *CompareSingle2Cluster) CompareString(key string) *CompareResult { + notequalreason := make(map[string]interface{}) + compareresult := NewCompareResult() + compareresult.NotEqualReason = notequalreason + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + //比较key的存在状态是否一致 + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + //比较string value是否一致 + result = compare.CompareStringVal(key) + if !result.IsEqual { + return result + } + + //比较ttl差值是否在允许范围内 + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + return &compareresult +} + +func (compare *CompareSingle2Cluster) CompareList(key string) *CompareResult { + notequalreason := make(map[string]interface{}) + compareresult := NewCompareResult() + compareresult.NotEqualReason = notequalreason + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareListLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareListIndexVal(key) + if !result.IsEqual { + return result + } + return &compareresult + +} + +func (compare *CompareSingle2Cluster) CompareHash(key string) *CompareResult { + notequalreason := make(map[string]interface{}) + compareresult := NewCompareResult() + compareresult.NotEqualReason = notequalreason + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareHashLen(key) + if !result.IsEqual { + return result + } + + result = compare.CompareHashFieldVal(key) + if !result.IsEqual { + return result + } + + return &compareresult +} + +func (compare *CompareSingle2Cluster) CompareSet(key string) *CompareResult { + notequalreason := make(map[string]interface{}) + compareresult := NewCompareResult() + compareresult.NotEqualReason = notequalreason + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareSetLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareSetMember(key) + if !result.IsEqual { + return result + } + return &compareresult +} + +func (compare *CompareSingle2Cluster) CompareZset(key string) *CompareResult { + notequalreason := make(map[string]interface{}) + compareresult := NewCompareResult() + compareresult.NotEqualReason = notequalreason + compareresult.Key = key + compareresult.KeyType = "zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareZsetLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareZsetMemberScore(key) + if !result.IsEqual { + return result + } + return &compareresult +} + +//判断key在source和target同时不存在 +func (compare *CompareSingle2Cluster) KeyExistsStatusEqual(key string) *CompareResult { + + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + //compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourceexists := KeyExists(compare.Source, key) + targetexists := KeyExistsInCluster(compare.Target, key) + + if sourceexists == targetexists { + return &compareresult + } + + compareresult.IsEqual = false + reason["description"] = "Source or Target key not exists" + reason["source"] = sourceexists + reason["target"] = targetexists + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult +} + +//比较Zset member以及sore值是否一致 +func (compare *CompareSingle2Cluster) CompareZsetMemberScore(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.ZScan(key, cursor, "*", compare.BatchSize).Result() + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source zscan error" + reason["zscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for i := 0; i < len(sourceresult); i = i + 2 { + sourecemember := sourceresult[i] + sourcescore, err := strconv.ParseFloat(sourceresult[i+1], 64) + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Convert sourcescore to float64 error" + reason["floattostringerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + intcmd := compare.Target.ZRank(key, sourecemember) + targetscore := compare.Target.ZScore(key, sourecemember).Val() + + if intcmd == nil { + compareresult.IsEqual = false + reason["description"] = "Source zset member not exists in Target" + reason["member"] = sourecemember + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + if targetscore != sourcescore { + compareresult.IsEqual = false + reason["description"] = "zset member score not equal" + reason["member"] = sourecemember + reason["sourcescore"] = sourcescore + reason["targetscore"] = targetscore + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + } + + cursor = c + if c == 0 { + break + } + } + return &compareresult +} + +//比较zset 长度是否一致 +func (compare *CompareSingle2Cluster) CompareZsetLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.ZCard(key).Val() + targetlen := compare.Target.ZCard(key).Val() + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "Zset length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较set member 是否一致 +func (compare *CompareSingle2Cluster) CompareSetMember(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.SScan(key, cursor, "*", compare.BatchSize).Result() + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source sscan error" + reason["sscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for _, v := range sourceresult { + if !compare.Target.SIsMember(key, v).Val() { + compareresult.IsEqual = false + reason["description"] = "Source set member not exists in Target" + reason["member"] = v + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + + cursor = c + if c == 0 { + break + } + } + return &compareresult +} + +//比较set长度 +func (compare *CompareSingle2Cluster) CompareSetLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.SCard(key).Val() + targetlen := compare.Target.SCard(key).Val() + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "Set length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较hash field value 返回首个不相等的field +func (compare *CompareSingle2Cluster) CompareHashFieldVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.HScan(key, cursor, "*", compare.BatchSize).Result() + + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source hscan error" + reason["hscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for i := 0; i < len(sourceresult); i = i + 2 { + targetfieldval := compare.Target.HGet(key, sourceresult[i]).Val() + if targetfieldval != sourceresult[i+1] { + compareresult.IsEqual = false + reason["description"] = "Field value not equal" + reason["field"] = sourceresult[i] + reason["sourceval"] = sourceresult[i+1] + reason["targetval"] = targetfieldval + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + cursor = c + if c == uint64(0) { + break + } + } + return &compareresult +} + +//比较hash长度 +func (compare *CompareSingle2Cluster) CompareHashLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.HLen(key).Val() + targetlen := compare.Target.HLen(key).Val() + + if sourcelen != targetlen { + + compareresult.IsEqual = false + reason["description"] = "Hash length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较list index对应值是否一致,返回第一条错误的index以及源和目标对应的值 +func (compare *CompareSingle2Cluster) CompareListIndexVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.LLen(key).Val() + //targetlen := compare.Target.LLen(key).Val() + + compareresult.Key = key + quotient := sourcelen / compare.BatchSize // integer division, decimals are truncated + remainder := sourcelen % compare.BatchSize + + if quotient != 0 { + var lrangeend int64 + for i := int64(0); i < quotient; i++ { + if i == quotient-int64(1) { + lrangeend = quotient * compare.BatchSize + } else { + lrangeend = (compare.BatchSize - 1) + i*compare.BatchSize + } + sourcevalues := compare.Source.LRange(key, int64(0)+i*compare.BatchSize, lrangeend).Val() + targetvalues := compare.Target.LRange(key, int64(0)+i*compare.BatchSize, lrangeend).Val() + for k, v := range sourcevalues { + if targetvalues[k] != v { + compareresult.IsEqual = false + reason["description"] = "List index value not equal" + reason["Index"] = int64(k) + i*compare.BatchSize + reason["sourceval"] = v + reason["targetval"] = targetvalues[k] + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + } + } + + if remainder != 0 { + var rangstart int64 + + if quotient == int64(0) { + rangstart = int64(0) + } else { + rangstart = quotient*compare.BatchSize + 1 + } + + sourcevalues := compare.Source.LRange(key, rangstart, remainder+quotient*compare.BatchSize).Val() + targetvalues := compare.Target.LRange(key, rangstart, remainder+quotient*compare.BatchSize).Val() + for k, v := range sourcevalues { + if targetvalues[k] != v { + compareresult.IsEqual = false + reason["description"] = "List index value not equal" + reason["Index"] = int64(k) + rangstart + reason["sourceval"] = v + reason["targetval"] = targetvalues[k] + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + } + + return &compareresult + +} + +//比较list长度是否一致 +func (compare *CompareSingle2Cluster) CompareListLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.LLen(key).Val() + targetlen := compare.Target.LLen(key).Val() + + compareresult.Key = key + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "List length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//对比string类型value是否一致 +func (compare *CompareSingle2Cluster) CompareStringVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourceval := compare.Source.Get(key).Val() + targetval := compare.Target.Get(key).Val() + compareresult.Key = key + if sourceval != targetval { + compareresult.IsEqual = false + reason["description"] = "String value not equal" + reason["sval"] = sourceval + reason["tval"] = targetval + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//对比key TTl差值 +func (compare *CompareSingle2Cluster) DiffTTLOver(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcettl := compare.Source.PTTL(key).Val().Milliseconds() + targetttl := compare.Target.PTTL(key).Val().Milliseconds() + + sub := targetttl - sourcettl + if math.Abs(float64(sub)) > compare.TTLDiff { + compareresult.IsEqual = false + reason["description"] = "Key ttl difference is too large" + reason["TTLDiff"] = int64(math.Abs(float64(sub))) + reason["sourcettl"] = sourcettl + reason["targetttl"] = targetttl + + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +func KeyExistsInCluster(client *redis.ClusterClient, key string) bool { + exists := client.Exists(key).Val() + if exists == int64(1) { + return true + } else { + return false + } +} diff --git a/compare/comparesingle2cluster_test.go b/compare/comparesingle2cluster_test.go new file mode 100644 index 0000000..a28e525 --- /dev/null +++ b/compare/comparesingle2cluster_test.go @@ -0,0 +1,42 @@ +package compare + +import ( + "github.com/go-redis/redis/v7" + "testcase/commons" + "testing" +) + +func TestCompareSingle2Single_CompareDB(t *testing.T) { + saddr := "114.67.100.239:6379" + opt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + opt.Password = "redistest0102" + sclient := commons.GetGoRedisClient(opt) + + //compare := CompareSingle2Single{ + // Source: client, Target: client, BatchSize: 10, + //} + + tclusterclient := redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: []string{"114.67.67.7:16379", + " 114.67.67.7:16380", + " 114.67.83.163:16379 ", + " 114.67.83.163:16380 ", + " 114.67.112.67:16379 ", + " 114.67.112.67:16380"}, + Password: "testredis0102", + }) + + csc := &CompareSingle2Cluster{ + Source: sclient, //源redis single + Target: tclusterclient, //目标redis single + BatchSize: int64(30), //比较List、Set、Zset类型时的每批次值的数量 + CompareThreads: 4, //比较db线程数量 + TTLDiff: float64(100000), //TTL最小差值 + SourceDB: 0, //源redis DB number + TargetDB: 0, //目标redis DB number + } + csc.CompareDB() +} diff --git a/compare/comparesingle2single.go b/compare/comparesingle2single.go new file mode 100644 index 0000000..e74716b --- /dev/null +++ b/compare/comparesingle2single.go @@ -0,0 +1,640 @@ +package compare + +import ( + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "go.uber.org/zap" + "math" + "runtime" + "strconv" + "sync" + "testcase/global" + "time" +) + +type CompareSingle2Single struct { + Source *redis.Client //源redis single + Target *redis.Client //目标redis single + LogOut bool //是否输出日志 + LogOutPath string //日志输出路径 + BatchSize int64 //比较List、Set、Zset类型时的每批次值的数量 + CompareThreads int //比较db线程数量 + TTLDiff float64 //TTL最小差值 + SourceDB int //源redis DB number + TargetDB int //目标redis DB number +} + +func (compare *CompareSingle2Single) CompareDB() { + + wg := sync.WaitGroup{} + threads := runtime.NumCPU() + if compare.CompareThreads > 0 { + threads = compare.CompareThreads + } + cursor := uint64(0) + global.RSPLog.Sugar().Info("CompareSingle2single DB begin") + ticker := time.NewTicker(time.Second * 20) + defer ticker.Stop() + + pool, err := ants.NewPool(threads) + + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + defer pool.Release() + + for { + result, c, err := compare.Source.Scan(cursor, "*", compare.BatchSize).Result() + + if err != nil { + global.RSPLog.Sugar().Info(result, c, err) + return + } + + //当pool有活动worker时提交异步任务 + for { + if pool.Free() > 0 { + wg.Add(1) + pool.Submit(func() { + compare.CompareKeys(result) + wg.Done() + }) + break + } + } + cursor = c + + if c == 0 { + break + } + + select { + case <-ticker.C: + global.RSPLog.Sugar().Info("Comparing...") + default: + continue + } + } + wg.Wait() + global.RSPLog.Sugar().Info("CompareSingle2single End") +} + +func (compare *CompareSingle2Single) CompareKeys(keys []string) { + var result *CompareResult + for _, v := range keys { + keytype, err := compare.Source.Type(v).Result() + if err != nil { + global.RSPLog.Sugar().Error(err) + continue + } + result = nil + switch { + case keytype == "string": + result = compare.CompareString(v) + case keytype == "list": + result = compare.CompareList(v) + case keytype == "set": + result = compare.CompareSet(v) + case keytype == "zset": + result = compare.CompareZset(v) + case keytype == "hash": + result = compare.CompareHash(v) + default: + global.RSPLog.Info("No type find in compare list", zap.String("key", v), zap.String("type", keytype)) + } + + if result != nil && !result.IsEqual { + global.RSPLog.Info("", zap.Any("CompareResult", result)) + } + } +} + +func (compare *CompareSingle2Single) CompareString(key string) *CompareResult { + + //比较key的存在状态是否一致 + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + //比较string value是否一致 + result = compare.CompareStringVal(key) + if !result.IsEqual { + return result + } + + //比较ttl差值是否在允许范围内 + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + compareresult := NewCompareResult() + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + return &compareresult +} + +func (compare *CompareSingle2Single) CompareList(key string) *CompareResult { + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareListLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareListIndexVal(key) + if !result.IsEqual { + return result + } + + compareresult := NewCompareResult() + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + return &compareresult + +} + +func (compare *CompareSingle2Single) CompareHash(key string) *CompareResult { + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareHashLen(key) + if !result.IsEqual { + return result + } + + result = compare.CompareHashFieldVal(key) + if !result.IsEqual { + return result + } + + compareresult := NewCompareResult() + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + return &compareresult +} + +func (compare *CompareSingle2Single) CompareSet(key string) *CompareResult { + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareSetLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareSetMember(key) + if !result.IsEqual { + return result + } + + compareresult := NewCompareResult() + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + return &compareresult +} + +func (compare *CompareSingle2Single) CompareZset(key string) *CompareResult { + + result := compare.KeyExistsStatusEqual(key) + if !result.IsEqual { + return result + } + + result = compare.CompareZsetLen(key) + if !result.IsEqual { + return result + } + + result = compare.DiffTTLOver(key) + if !result.IsEqual { + return result + } + + result = compare.CompareZsetMemberScore(key) + if !result.IsEqual { + return result + } + + compareresult := NewCompareResult() + compareresult.Key = key + compareresult.KeyType = "zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + return &compareresult +} + +//判断key在source和target同时不存在 +func (compare *CompareSingle2Single) KeyExistsStatusEqual(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + //compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourceexists := KeyExists(compare.Source, key) + targetexists := KeyExists(compare.Target, key) + + if sourceexists == targetexists { + return &compareresult + } + + compareresult.IsEqual = false + reason["description"] = "Source or Target key not exists" + reason["source"] = sourceexists + reason["target"] = targetexists + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult +} + +//比较Zset member以及sore值是否一致 +func (compare *CompareSingle2Single) CompareZsetMemberScore(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.ZScan(key, cursor, "*", compare.BatchSize).Result() + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source zscan error" + reason["zscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for i := 0; i < len(sourceresult); i = i + 2 { + sourecemember := sourceresult[i] + sourcescore, err := strconv.ParseFloat(sourceresult[i+1], 64) + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Convert sourcescore to float64 error" + reason["floattostringerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + intcmd := compare.Target.ZRank(key, sourecemember) + targetscore := compare.Target.ZScore(key, sourecemember).Val() + + if intcmd == nil { + compareresult.IsEqual = false + reason["description"] = "Source zset member not exists in Target" + reason["member"] = sourecemember + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + if targetscore != sourcescore { + compareresult.IsEqual = false + reason["description"] = "zset member score not equal" + reason["member"] = sourecemember + reason["sourcescore"] = sourcescore + reason["targetscore"] = targetscore + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + } + + cursor = c + if c == 0 { + break + } + } + return &compareresult +} + +//比较zset 长度是否一致 +func (compare *CompareSingle2Single) CompareZsetLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "Zset" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.ZCard(key).Val() + targetlen := compare.Target.ZCard(key).Val() + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "Zset length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较set member 是否一致 +func (compare *CompareSingle2Single) CompareSetMember(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.SScan(key, cursor, "*", compare.BatchSize).Result() + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source sscan error" + reason["sscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for _, v := range sourceresult { + if !compare.Target.SIsMember(key, v).Val() { + compareresult.IsEqual = false + reason["description"] = "Source set member not exists in Target" + reason["member"] = v + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + + cursor = c + if c == 0 { + break + } + } + return &compareresult +} + +//比较set长度 +func (compare *CompareSingle2Single) CompareSetLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "set" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.SCard(key).Val() + targetlen := compare.Target.SCard(key).Val() + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "Set length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较hash field value 返回首个不相等的field +func (compare *CompareSingle2Single) CompareHashFieldVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + cursor := uint64(0) + for { + sourceresult, c, err := compare.Source.HScan(key, cursor, "*", compare.BatchSize).Result() + + if err != nil { + compareresult.IsEqual = false + reason["description"] = "Source hscan error" + reason["hscanerror"] = err.Error() + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + + for i := 0; i < len(sourceresult); i = i + 2 { + targetfieldval := compare.Target.HGet(key, sourceresult[i]).Val() + if targetfieldval != sourceresult[i+1] { + compareresult.IsEqual = false + reason["description"] = "Field value not equal" + reason["field"] = sourceresult[i] + reason["sourceval"] = sourceresult[i+1] + reason["targetval"] = targetfieldval + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + cursor = c + if c == uint64(0) { + break + } + } + return &compareresult +} + +//比较hash长度 +func (compare *CompareSingle2Single) CompareHashLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "hash" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.HLen(key).Val() + targetlen := compare.Target.HLen(key).Val() + + if sourcelen != targetlen { + + compareresult.IsEqual = false + reason["description"] = "Hash length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//比较list index对应值是否一致,返回第一条错误的index以及源和目标对应的值 +func (compare *CompareSingle2Single) CompareListIndexVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.LLen(key).Val() + //targetlen := compare.Target.LLen(key).Val() + + compareresult.Key = key + quotient := sourcelen / compare.BatchSize // integer division, decimals are truncated + remainder := sourcelen % compare.BatchSize + + if quotient != 0 { + var lrangeend int64 + for i := int64(0); i < quotient; i++ { + if i == quotient-int64(1) { + lrangeend = quotient * compare.BatchSize + } else { + lrangeend = (compare.BatchSize - 1) + i*compare.BatchSize + } + sourcevalues := compare.Source.LRange(key, int64(0)+i*compare.BatchSize, lrangeend).Val() + targetvalues := compare.Target.LRange(key, int64(0)+i*compare.BatchSize, lrangeend).Val() + for k, v := range sourcevalues { + if targetvalues[k] != v { + compareresult.IsEqual = false + reason["description"] = "List index value not equal" + reason["Index"] = int64(k) + i*compare.BatchSize + reason["sourceval"] = v + reason["targetval"] = targetvalues[k] + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + } + } + + if remainder != 0 { + var rangstart int64 + + if quotient == int64(0) { + rangstart = int64(0) + } else { + rangstart = quotient*compare.BatchSize + 1 + } + + sourcevalues := compare.Source.LRange(key, rangstart, remainder+quotient*compare.BatchSize).Val() + targetvalues := compare.Target.LRange(key, rangstart, remainder+quotient*compare.BatchSize).Val() + for k, v := range sourcevalues { + if targetvalues[k] != v { + compareresult.IsEqual = false + reason["description"] = "List index value not equal" + reason["Index"] = int64(k) + rangstart + reason["sourceval"] = v + reason["targetval"] = targetvalues[k] + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + } + } + + return &compareresult + +} + +//比较list长度是否一致 +func (compare *CompareSingle2Single) CompareListLen(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "list" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcelen := compare.Source.LLen(key).Val() + targetlen := compare.Target.LLen(key).Val() + + compareresult.Key = key + if sourcelen != targetlen { + compareresult.IsEqual = false + reason["description"] = "List length not equal" + reason["sourcelen"] = sourcelen + reason["targetlen"] = targetlen + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//对比string类型value是否一致 +func (compare *CompareSingle2Single) CompareStringVal(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourceval := compare.Source.Get(key).Val() + targetval := compare.Target.Get(key).Val() + compareresult.Key = key + if sourceval != targetval { + compareresult.IsEqual = false + reason["description"] = "String value not equal" + reason["sval"] = sourceval + reason["tval"] = targetval + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +//对比key TTl差值 +func (compare *CompareSingle2Single) DiffTTLOver(key string) *CompareResult { + compareresult := NewCompareResult() + reason := make(map[string]interface{}) + compareresult.Key = key + compareresult.KeyType = "string" + compareresult.SourceDB = compare.SourceDB + compareresult.TargetDB = compare.TargetDB + + sourcettl := compare.Source.PTTL(key).Val().Milliseconds() + targetttl := compare.Target.PTTL(key).Val().Milliseconds() + + sub := targetttl - sourcettl + if math.Abs(float64(sub)) > compare.TTLDiff { + compareresult.IsEqual = false + reason["description"] = "Key ttl difference is too large" + reason["TTLDiff"] = int64(math.Abs(float64(sub))) + reason["sourcettl"] = sourcettl + reason["targetttl"] = targetttl + + compareresult.KeyDiffReason = append(compareresult.KeyDiffReason, reason) + return &compareresult + } + return &compareresult +} + +func KeyExists(client *redis.Client, key string) bool { + exists := client.Exists(key).Val() + if exists == int64(1) { + return true + } else { + return false + } +} diff --git a/compare/comparesingle2single_test.go b/compare/comparesingle2single_test.go new file mode 100644 index 0000000..5054537 --- /dev/null +++ b/compare/comparesingle2single_test.go @@ -0,0 +1,98 @@ +package compare + +import ( + "fmt" + "github.com/go-redis/redis/v7" + "math/rand" + "reflect" + "testcase/commons" + "testing" +) + +type TT struct { + A string + B int + IsA bool +} + +func (t *TT) Testtt() { + var temp interface{} + + if t.IsA { + temp = t.A + } else { + temp = t.B + } + + fmt.Println(reflect.TypeOf(temp)) + +} +func TestCompare_CompareDB(t *testing.T) { + + saddr := "114.67.100.239:6379" + opt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + opt.Password = "redistest0102" + client := commons.GetGoRedisClient(opt) + + //compare := CompareSingle2Cluster{ + // Source: client, Target: client, BatchSize: 10, + //} + + for i := 0; i < 20; i++ { + member := &redis.Z{Score: rand.Float64() * float64(rand.Int()), Member: i} + client.ZAdd("z_aaa", member) + } + a := true + b := false + c := true + d := false + fmt.Println(client.ZRank("z_aaa", "10")) + fmt.Println(client.ZRank("z_aaa", "100")) + fmt.Println(client.ZRank("z_aa", "10")) + + fmt.Println(a && b) + fmt.Println(a && c) + fmt.Println(b && c) + fmt.Println(b && d) + //fmt.Println((a == b) || (b == d)) + + fmt.Println(a ^ b) + fmt.Println(a ^ c) + fmt.Println(b ^ c) + fmt.Println(b ^ d) + + // + //compare.CompareDB() + //tt := TT{ + // A: "abc", + // B: 1234, + // IsA: false, + //} + // + //tt.Testtt() + //rdb := redis.NewClusterClient(&redis.ClusterOptions{ + // Addrs: []string{"114.67.67.7:16379", + // " 114.67.67.7:16380", + // " 114.67.83.163:16379 ", + // " 114.67.83.163:16380 ", + // " 114.67.112.67:16379 ", + // " 114.67.112.67:16380"}, + // Password: "testredis0102", + //}) + ////rdb.ClientList() + // + //rdb.Set("123", "bbb", 0*time.Second) + //rdb.Set("456", "bbb", 0*time.Second) + //fmt.Println(rdb.Get("aaa")) + //fmt.Println(rdb.Get("123")) + //fmt.Println(rdb.Get("456")) + //fmt.Println(rdb.ClusterKeySlot("aaa")) + //fmt.Println(rdb.ClusterKeySlot("123")) + //fmt.Println(rdb.ClusterKeySlot("456")) + // + //fmt.Println(rdb.ClusterNodes()) + +} diff --git a/config.yml b/config.yml new file mode 100644 index 0000000..64b65fa --- /dev/null +++ b/config.yml @@ -0,0 +1,11 @@ +syncserver: "http://dev:8888" +zap: + level: 'debug' + format: 'console' + prefix: '[RedissyncerT]' + director: 'log' + link-name: 'latest_log' + show-line: true + encode-level: 'LowercaseColorLevelEncoder' +# stacktrace-key: 'stacktrace' + log-in-console: true diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..26b84b4 --- /dev/null +++ b/config/config.go @@ -0,0 +1,5 @@ +package config + +type Server struct { + Zap Zap `mapstructure:"zap" json:"zap" yaml:"zap"` +} diff --git a/config/zap.go b/config/zap.go new file mode 100644 index 0000000..f83b6fb --- /dev/null +++ b/config/zap.go @@ -0,0 +1,13 @@ +package config + +type Zap struct { + Level string `mapstructure:"level" json:"level" yaml:"level"` + Format string `mapstructure:"format" json:"format" yaml:"format"` + Prefix string `mapstructure:"prefix" json:"prefix" yaml:"prefix"` + Director string `mapstructure:"director" json:"director" yaml:"director"` + LinkName string `mapstructure:"link-name" json:"linkName" yaml:"link-name"` + ShowLine bool `mapstructure:"show-line" json:"showLine" yaml:"showLine"` + EncodeLevel string `mapstructure:"encode-level" json:"encodeLevel" yaml:"encode-level"` + StacktraceKey string `mapstructure:"stacktrace-key" json:"stacktraceKey" yaml:"stacktrace-key"` + LogInConsole bool `mapstructure:"log-in-console" json:"logInConsole" yaml:"log-in-console"` +} diff --git a/core/constant.go b/core/constant.go new file mode 100644 index 0000000..eb558f3 --- /dev/null +++ b/core/constant.go @@ -0,0 +1,6 @@ +package core +const ( + ConfigEnv = "RSP_CONFIG" + ConfigFile = "config.yml" +) + diff --git a/core/viper.go b/core/viper.go new file mode 100644 index 0000000..89dc99f --- /dev/null +++ b/core/viper.go @@ -0,0 +1,66 @@ + + +package core + +import ( + "fmt" + "github.com/fsnotify/fsnotify" + "github.com/spf13/viper" + "os" + "path/filepath" + "testcase/commons" + "testcase/global" +) + +func Viper(path ...string) *viper.Viper { + var config string + + if len(path) == 0 { + //默认config文件查找路径 ./ -> 执行文件路径 + if config == "" { // 优先级: 命令行 > 环境变量 > 默认值 + if configEnv := os.Getenv(ConfigEnv); configEnv == "" { + //获取可执行文件的绝对路径 + dir, _ := filepath.Abs(filepath.Dir(os.Args[0])) + + if commons.FileExists(ConfigFile) { + config = ConfigFile + } + + if commons.FileExists(dir + "/" + ConfigFile) { + config = dir + "/" + ConfigFile + } + + //fmt.Printf("您正在使用config的默认值,config的路径为%v\n", config) + } else { + config = configEnv + //fmt.Printf("您正在使用GVA_CONFIG环境变量,config的路径为%v\n", config) + } + } else { + //fmt.Printf("您正在使用命令行的-c参数传递的值,config的路径为%v\n", config) + } + } else { + config = path[0] + //fmt.Printf("您正在使用func Viper()传递的值,config的路径为%v\n", config) + } + + v := viper.New() + v.SetConfigFile(config) + err := v.ReadInConfig() + if err != nil { + panic(fmt.Errorf("Fatal error config file: %s \n", err)) + } + v.WatchConfig() + + v.OnConfigChange(func(e fsnotify.Event) { + fmt.Println("config file changed:", e.Name) + if err := v.Unmarshal(&global.RSPConfig); err != nil { + fmt.Println(err) + } + }) + + if err := v.Unmarshal(&global.RSPConfig); err != nil { + fmt.Println(err) + } + + return v +} diff --git a/core/zap.go b/core/zap.go new file mode 100644 index 0000000..b46dd04 --- /dev/null +++ b/core/zap.go @@ -0,0 +1,129 @@ +package core + +import ( + "fmt" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "os" + "path" + "path/filepath" + "testcase/global" + "testcase/utils" + "time" +) + +var level zapcore.Level + +func Zap() (logger *zap.Logger) { + //ToDo 增加zap 参数默认值,当globale viper == nil,使用默认值 + //zipConfig := config.Zap{ + // Level: "info", + // Format: "console", + // Prefix: "[RedissyncerT]", + // Director: "log", + // LinkName: "latest_log", + // ShowLine: true, + // EncodeLevel: "LowercaseColorLevelEncoder", + // StacktraceKey: "stacktrace", + // LogInConsole: true, + //} + // + //if global.RSPConfig != nil { + // zipConfig = global.RSPConfig.Zap + //} + + if ok, _ := utils.PathExists(global.RSPConfig.Zap.Director); !ok { // 判断是否有Director文件夹 + if !path.IsAbs(global.RSPConfig.Zap.Director) { + dir, err := filepath.Abs(filepath.Dir(os.Args[0])) + if err != nil { + panic(err) + } + global.RSPConfig.Zap.Director = dir + "/" + global.RSPConfig.Zap.Director + global.RSPConfig.Zap.LinkName = dir + "/" + global.RSPConfig.Zap.LinkName + } + fmt.Printf("create %v directory\n", global.RSPConfig.Zap.Director) + _ = os.Mkdir(global.RSPConfig.Zap.Director, os.ModePerm) + } + + switch global.RSPConfig.Zap.Level { // 初始化配置文件的Level + case "debug": + level = zap.DebugLevel + case "info": + level = zap.InfoLevel + case "warn": + level = zap.WarnLevel + case "error": + level = zap.ErrorLevel + case "dpanic": + level = zap.DPanicLevel + case "panic": + level = zap.PanicLevel + case "fatal": + level = zap.FatalLevel + default: + level = zap.InfoLevel + } + + if level == zap.DebugLevel || level == zap.ErrorLevel { + logger = zap.New(getEncoderCore(), zap.AddStacktrace(level)) + } else { + logger = zap.New(getEncoderCore()) + } + if global.RSPConfig.Zap.ShowLine { + logger = logger.WithOptions(zap.AddCaller()) + } + return logger +} + +// getEncoderConfig 获取zapcore.EncoderConfig +func getEncoderConfig() (config zapcore.EncoderConfig) { + config = zapcore.EncoderConfig{ + MessageKey: "message", + LevelKey: "level", + TimeKey: "time", + NameKey: "logger", + CallerKey: "caller", + StacktraceKey: global.RSPConfig.Zap.StacktraceKey, + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: CustomTimeEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.FullCallerEncoder, + } + switch { + case global.RSPConfig.Zap.EncodeLevel == "LowercaseLevelEncoder": // 小写编码器(默认) + config.EncodeLevel = zapcore.LowercaseLevelEncoder + case global.RSPConfig.Zap.EncodeLevel == "LowercaseColorLevelEncoder": // 小写编码器带颜色 + config.EncodeLevel = zapcore.LowercaseColorLevelEncoder + case global.RSPConfig.Zap.EncodeLevel == "CapitalLevelEncoder": // 大写编码器 + config.EncodeLevel = zapcore.CapitalLevelEncoder + case global.RSPConfig.Zap.EncodeLevel == "CapitalColorLevelEncoder": // 大写编码器带颜色 + config.EncodeLevel = zapcore.CapitalColorLevelEncoder + default: + config.EncodeLevel = zapcore.LowercaseLevelEncoder + } + return config +} + +// getEncoder 获取zapcore.Encoder +func getEncoder() zapcore.Encoder { + if global.RSPConfig.Zap.Format == "json" { + return zapcore.NewJSONEncoder(getEncoderConfig()) + } + return zapcore.NewConsoleEncoder(getEncoderConfig()) +} + +// getEncoderCore 获取Encoder的zapcore.Core +func getEncoderCore() (core zapcore.Core) { + writer, err := utils.GetWriteSyncer() // 使用file-rotatelogs进行日志分割 + if err != nil { + fmt.Printf("Get Write Syncer Failed err:%v", err.Error()) + return + } + return zapcore.NewCore(getEncoder(), writer, level) +} + +// 自定义日志输出时间格式 +func CustomTimeEncoder(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format(global.RSPConfig.Zap.Prefix + "2006/01/02 - 15:04:05.000")) +} diff --git a/createtask.json b/createtask.json new file mode 100644 index 0000000..8d96be3 --- /dev/null +++ b/createtask.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "114.67.100.239:6379", + "targetRedisAddress": "114.67.100.240:6379", + "targetPassword": "redistest0102", + "taskName": "迁移任务1", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} \ No newline at end of file diff --git a/gendatayamls/gen.yml b/gendatayamls/gen.yml new file mode 100644 index 0000000..a0ae64d --- /dev/null +++ b/gendatayamls/gen.yml @@ -0,0 +1,79 @@ +--- +# 目标类型 0 单实例 1 集群 默认为0 +type: 0 +# 目标地址 数组 单实例取第一个地址 +addr: + - '127.0.0.1:6379' +password: "123456" +db: 0 +# 大key生成,用于生成set、hash、zset等集合类key以及string +bigkey: + # key名后缀长度 + keysuffixlen: 4 + # set、hash等集合类型包含元素的的数量 + length: 1024 + # value的字节数 + valuesize: 512 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 60 + # 数据生成间隔,单位为毫秒 + datageninterval: 20 +# 生成随机key配置描述,按照指定规则生成key,尽量覆盖redis中所有类型的key和操作 +randkey: + # key名后缀长度 + keysuffixlen: 4 + # value的字节数 + valuesize: 512 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 60 + # 数据生成间隔,单位为毫秒 + datageninterval: 20 + # 生成数据的并发进程数,默认为cpu核数 + threads: 2 + +... + +--- +# 目标类型 0 单实例 1 集群 默认为0 +type: 1 +# 目标地址 数组 单实例取第一个地址 +addr: + - '127.0.0.1:6379' + - '10.0.0.1:6379' + - '120.0.0.1:6379' +password: "123456" +db: 0 +# 大key生成,用于生成set、hash、zset等集合类key以及string +bigkey: + # key名后缀长度 + keysuffixlen: 4 + # set、hash等集合类型包含元素的的数量 + length: 1024 + # value的字节数 + valuesize: 512 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 60 + # 数据生成间隔,单位为毫秒 + datageninterval: 20 +# 生成随机key配置描述,按照指定规则生成key,尽量覆盖redis中所有类型的key和操作 +randkey: + # key名后缀长度 + keysuffixlen: 4 + # value的字节数 + valuesize: 512 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 60 + # 数据生成间隔,单位为毫秒 + datageninterval: 20 + # 生成数据的并发进程数,默认为cpu核数 + threads: 2 + +... \ No newline at end of file diff --git a/gendatayamls/gensingle.yml b/gendatayamls/gensingle.yml new file mode 100644 index 0000000..2889351 --- /dev/null +++ b/gendatayamls/gensingle.yml @@ -0,0 +1,39 @@ +--- +# 目标类型 0 单实例 1 集群 默认为0 +type: 0 +# 目标地址 数组 单实例取第一个地址 +addr: + - '114.67.76.82:16375' +password: "redistest0102" +db: 2 +# 大key生成,用于生成set、hash、zset等集合类key以及string +bigkey: + # key名后缀长度 + keysuffixlen: 4 + # set、hash等集合类型包含元素的的数量 + length: 99 + # value的字节数 + valuesize: 128 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 10 + # 数据生成间隔,单位为毫秒 + datageninterval: 2 +# 生成随机key配置描述,按照指定规则生成key,尽量覆盖redis中所有类型的key和操作,无需定义value,value与key一致 +randkey: + # key名后缀长度 + keysuffixlen: 4 + # 对于append、lpush这样需要多次执行的命令执行的次数 + loopstep: 10 + # key过期时间,单位为秒 + expire: 3600 + # 持续时间之内循环生成大key,数据产生时间超过持续时间时至少生成一次 + duaration: 120 + # 数据生成间隔,单位为毫秒 + datageninterval: 200 + # 生成数据的并发进程数,默认为cpu核数 + threads: 2 + +... + diff --git a/generatedata/baseopt.go b/generatedata/baseopt.go new file mode 100644 index 0000000..5152a0f --- /dev/null +++ b/generatedata/baseopt.go @@ -0,0 +1,131 @@ +package generatedata + +type OptType int32 + +const ( + BO_APPEND OptType = iota + BO_BITOP + BO_DECR_DECRBY + BO_INCR_INCRBY_INCRBYFLOAT + BO_MSET_MSETNX + //BO_PSETEX_SETEX + BO_PFADD + BO_PFMERGE + BO_SET_SETNX + BO_SETBIT + BO_SETRANGE + BO_HINCRBY_HINCRBYFLOAT + BO_HSET_HMSET_HSETNX + BO_LPUSH_LPOP_LPUSHX + BO_LREM_LTRIM_LINSERT + BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH + BO_BLPOP_BRPOP_BRPOPLPUSH + BO_SADD_SMOVE_SPOP_SREM + BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE + BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM + BO_ZPOPMAX_ZPOPMIN + BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE + BO_ZUNIONSTORE_ZINTERSTORE +) + +var BaseOptArray = []OptType{ + BO_DECR_DECRBY, + BO_INCR_INCRBY_INCRBYFLOAT, + BO_MSET_MSETNX, + //BO_PSETEX_SETEX, + BO_PFADD, + BO_PFMERGE, + BO_SET_SETNX, + BO_SETBIT, + BO_SETRANGE, + BO_HINCRBY_HINCRBYFLOAT, + BO_HSET_HMSET_HSETNX, + BO_LPUSH_LPOP_LPUSHX, + BO_LREM_LTRIM_LINSERT, + BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH, + BO_BLPOP_BRPOP_BRPOPLPUSH, + BO_SADD_SMOVE_SPOP_SREM, + BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE, + BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM, + BO_ZPOPMAX_ZPOPMIN, + BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE, + BO_ZUNIONSTORE_ZINTERSTORE, +} + +func (ot OptType) String() string { + switch ot { + case BO_APPEND: + return "BO_APPEND" + case BO_BITOP: + return "BO_BITOP" + case BO_DECR_DECRBY: + return "BO_DECR_DECRBY" + case BO_INCR_INCRBY_INCRBYFLOAT: + return "BO_INCR_INCRBY_INCRBYFLOAT" + case BO_MSET_MSETNX: + return "BO_MSET_MSETNX" + //case BO_PSETEX_SETEX: + // return "BO_PSETEX_SETEX" + case BO_PFADD: + return "BO_PFADD" + case BO_PFMERGE: + return "BO_PFMERGE" + case BO_SET_SETNX: + return "BO_SET_SETNX" + case BO_SETBIT: + return "BO_SETBIT" + case BO_SETRANGE: + return "BO_SETRANGE" + case BO_HINCRBY_HINCRBYFLOAT: + return "BO_HINCRBY_HINCRBYFLOAT" + case BO_HSET_HMSET_HSETNX: + return "BO_HSET_HMSET_HSETNX" + case BO_LPUSH_LPOP_LPUSHX: + return "BO_LPUSH_LPOP_LPUSHX" + case BO_LREM_LTRIM_LINSERT: + return "BO_LREM_LTRIM_LINSERT" + case BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH: + return "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH" + case BO_BLPOP_BRPOP_BRPOPLPUSH: + return "BO_BLPOP_BRPOP_BRPOPLPUSH" + case BO_SADD_SMOVE_SPOP_SREM: + return "BO_SADD_SMOVE_SPOP_SREM" + case BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE: + return "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE" + case BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM: + return "BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM" + case BO_ZPOPMAX_ZPOPMIN: + return "BO_ZPOPMAX_ZPOPMIN" + case BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE: + return "BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE" + case BO_ZUNIONSTORE_ZINTERSTORE: + return "BO_ZUNIONSTORE_ZINTERSTORE" + default: + return "" + } +} + +type BaseOpt interface { + BO_APPEND() + BO_BITOP() + BO_DECR_DECRBY() + BO_INCR_INCRBY_INCRBYFLOAT() + BO_MSET_MSETNX() + BO_SET_SETNX() + BO_SETBIT() + BO_SETRANGE() + BO_HINCRBY_HINCRBYFLOAT() + BO_PFADD() + //BO_PFMERGE() + BO_HSET_HMSET_HSETNX() + BO_LPUSH_LPOP_LPUSHX() + BO_LREM_LTRIM_LINSERT() + BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH() + BO_BLPOP_BRPOP_BRPOPLPUSH() + BO_SADD_SMOVE_SPOP_SREM() + BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE() + BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM() + BO_ZPOPMAX_ZPOPMIN() + BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE() + BO_ZUNIONSTORE_ZINTERSTORE() +} diff --git a/generatedata/baseoptcluster.go b/generatedata/baseoptcluster.go new file mode 100644 index 0000000..92840e0 --- /dev/null +++ b/generatedata/baseoptcluster.go @@ -0,0 +1,721 @@ +//Package generatedata 用于生成测试过程中的数据 + +package generatedata + +import ( + "context" + "github.com/go-redis/redis/v7" + "go.uber.org/zap" + "math/rand" + "strconv" + "strings" + "testcase/global" + "time" +) + +type OptCluster struct { + ClusterClient *redis.ClusterClient + RedisVersion string + OptType OptType + KeySuffix string + Loopstep int + EXPIRE time.Duration + DB int +} + +func (optcluster *OptCluster) ExecOpt() { + + switch optcluster.OptType.String() { + case "BO_APPEND": + optcluster.BO_APPEND() + case "BO_BITOP": + optcluster.BO_BITOP() + case "BO_DECR_DECRBY": + optcluster.BO_DECR_DECRBY() + case "BO_INCR_INCRBY_INCRBYFLOAT": + optcluster.BO_INCR_INCRBY_INCRBYFLOAT() + case "BO_MSET_MSETNX": + optcluster.BO_MSET_MSETNX() + //case "BO_PSETEX_SETEX": + // optcluster.BO_PSETEX_SETEX() + case "BO_PFADD": + optcluster.BO_PFADD() + case "BO_SET_SETNX": + optcluster.BO_SET_SETNX() + case "BO_SETBIT": + optcluster.BO_SETBIT() + case "BO_SETRANGE": + optcluster.BO_SETRANGE() + case "BO_HINCRBY_HINCRBYFLOAT": + optcluster.BO_HINCRBY_HINCRBYFLOAT() + case "BO_HSET_HMSET_HSETNX": + optcluster.BO_HSET_HMSET_HSETNX() + case "BO_LPUSH_LPOP_LPUSHX": + optcluster.BO_LPUSH_LPOP_LPUSHX() + case "BO_LREM_LTRIM_LINSERT": + optcluster.BO_LREM_LTRIM_LINSERT() + case "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH": + optcluster.BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH() + case "BO_BLPOP_BRPOP_BRPOPLPUSH": + optcluster.BO_BLPOP_BRPOP_BRPOPLPUSH() + case "BO_SADD_SMOVE_SPOP_SREM": + optcluster.BO_SADD_SMOVE_SPOP_SREM() + case "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE": + optcluster.BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE() + case "BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM": + optcluster.BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM() + case "BO_ZPOPMAX_ZPOPMIN": + optcluster.BO_ZPOPMAX_ZPOPMIN() + case "BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE": + optcluster.BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE() + case "BO__ZUNIONSTORE_ZINTERSTORE": + optcluster.BO_ZUNIONSTORE_ZINTERSTORE() + default: + return + } + +} + +// 比较目标库版本是否小于要求版本 +func (optcluster OptCluster) VersionLessThan(version string) bool { + + boverarray := strings.Split(optcluster.RedisVersion, ".") + versionarry := strings.Split(version, ".") + + bover := "" + ver := "" + for i := 0; i < 3; i++ { + if i < len(boverarray) { + bover = bover + boverarray[i] + } else { + bover = bover + "0" + } + + if i < len(ver) { + ver = ver + versionarry[i] + } else { + ver = ver + "0" + } + } + + intbover, _ := strconv.Atoi(bover) + intver, _ := strconv.Atoi(ver) + + if intbover < intver { + return true + } + + return false + +} + +//SELECT命令 +//func (optcluster *OptCluster) BO_SELECT(db int) { +// +// _, err := optcluster.ClusterClient.Select(db).Result() +// if err != nil { +// zaplogger.Sugar().Error(err) +// return +// } +// optcluster.DB = db +//} + +//APPEND 命令基本操作 +//start version:2.0.0 +func (optcluster *OptCluster) BO_APPEND() { + t1 := time.Now() + appended := "append_" + optcluster.KeySuffix + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.Append(appended, strconv.Itoa(i)) + } + optcluster.ClusterClient.Expire(appended, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "APPEND"), zap.String("key", appended), zap.Int64("time", t2.Sub(t1).Milliseconds())) + +} + +//BITOP +//start version:2.6.0 +func (optcluster *OptCluster) BO_BITOP() { + t1 := time.Now() + strarry := []string{} + opandkey := "opand_" + optcluster.KeySuffix + oporkey := "opor_" + optcluster.KeySuffix + opxorkey := "opxor_" + optcluster.KeySuffix + opnotkey := "opnot_" + optcluster.KeySuffix + for i := 0; i < optcluster.Loopstep; i++ { + bitopkey := "bitop_" + optcluster.KeySuffix + strconv.Itoa(i) + optcluster.ClusterClient.Set(bitopkey, bitopkey, optcluster.EXPIRE) + strarry = append(strarry, bitopkey) + } + + optcluster.ClusterClient.BitOpAnd(opandkey, strarry...) + optcluster.ClusterClient.BitOpOr(oporkey, strarry...) + optcluster.ClusterClient.BitOpXor(opxorkey, strarry...) + optcluster.ClusterClient.BitOpNot(opnotkey, strarry[0]) + optcluster.ClusterClient.Expire(opandkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(oporkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(opxorkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(opnotkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BITOP"), zap.Any("keys", []string{opandkey, oporkey, opxorkey, opnotkey}), zap.Duration("time", t2.Sub(t1))) +} + +//DECR and DECRBY +func (optcluster *OptCluster) BO_DECR_DECRBY() { + t1 := time.Now() + desckey := "desc_" + optcluster.KeySuffix + optcluster.ClusterClient.Set(desckey, optcluster.Loopstep, optcluster.EXPIRE) + optcluster.ClusterClient.Decr(desckey) + optcluster.ClusterClient.DecrBy(desckey, rand.Int63n(int64(optcluster.Loopstep))) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "DECR_DECRBY"), zap.String("key", desckey), zap.Duration("time", t2.Sub(t1))) +} + +//INCR and INCRBY and INCRBYFLOAT +func (optcluster *OptCluster) BO_INCR_INCRBY_INCRBYFLOAT() { + t1 := time.Now() + incrkey := "incr_" + optcluster.KeySuffix + optcluster.ClusterClient.Set(incrkey, rand.Intn(optcluster.Loopstep), optcluster.EXPIRE) + optcluster.ClusterClient.Incr(incrkey) + optcluster.ClusterClient.IncrBy(incrkey, rand.Int63n(int64(optcluster.Loopstep))) + optcluster.ClusterClient.IncrByFloat(incrkey, rand.Float64()*float64(rand.Intn(optcluster.Loopstep))) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "INCR_INCRBY_INCRBYFLOAT"), zap.String("key", incrkey), zap.Duration("time", t2.Sub(t1))) +} + +//MSET and MSETNX +func (optcluster *OptCluster) BO_MSET_MSETNX() { + t1 := time.Now() + msetarry := []string{} + msetnxarry := []string{} + + msetkey := "mset_" + optcluster.KeySuffix + msetnxkey := "msetnx_" + optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + msetarry = append(msetarry, msetkey+strconv.Itoa(i)) + msetarry = append(msetarry, msetkey+strconv.Itoa(i)) + msetnxarry = append(msetnxarry, msetnxkey+strconv.Itoa(i)) + msetnxarry = append(msetnxarry, msetnxkey+strconv.Itoa(i)) + } + + optcluster.ClusterClient.MSetNX(msetnxarry) + optcluster.ClusterClient.MSet(msetarry) + optcluster.ClusterClient.MSetNX(msetnxarry) + + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.Expire(msetkey+strconv.Itoa(i), optcluster.EXPIRE) + optcluster.ClusterClient.Expire(msetnxkey+strconv.Itoa(i), optcluster.EXPIRE) + } + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "MGET_MSETNX"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +////PSETEX and SETEX +//func (bo *OptCluster) BO_PSETEX_SETEX() { +// t1 := time.Now() +// psetexkey := "psetex_" + bo.KeySuffix +// setexkey := "setex_" + bo.KeySuffix +// bo.ClusterClient.SetNX(setexkey, setexkey, bo.EXPIRE) +// bo.ClusterClient. +// bo.ClusterClient.Do("SETEX", setexkey, bo.EXPIRE.Seconds(), setexkey) +// bo.ClusterClient.Do("PSETEX", psetexkey, bo.EXPIRE.Milliseconds(), psetexkey) +// t2 := time.Now() +// zaplogger.Info("ExecCMD", zap.String("command", "MGET_MSETNX"), zap.String("KeySuffix", bo.KeySuffix), zap.Duration("time", t2.Sub(t1))) +// +//} + +//PFADD +func (optcluster *OptCluster) BO_PFADD() { + t1 := time.Now() + pfaddkey := "pfadd_" + optcluster.KeySuffix + rand.Seed(time.Now().UnixNano()) + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.PFAdd(pfaddkey, rand.Float64()*float64(rand.Int())) + } + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_PFADD"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SET and SETNX +func (optcluster *OptCluster) BO_SET_SETNX() { + t1 := time.Now() + nodeprefix := rand.Int() + setkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "set_" + optcluster.KeySuffix + setnxkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "setnx_" + optcluster.KeySuffix + optcluster.ClusterClient.Set(setkey, setkey, optcluster.EXPIRE) + optcluster.ClusterClient.SetNX(setnxkey, setkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "SET_SETNX"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SETBIT +func (optcluster *OptCluster) BO_SETBIT() { + t1 := time.Now() + setbitkey := "setbit_" + optcluster.KeySuffix + optcluster.ClusterClient.SetBit(setbitkey, rand.Int63n(int64(optcluster.Loopstep)), rand.Intn(optcluster.Loopstep)) + optcluster.ClusterClient.Expire(setbitkey, optcluster.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "SETBIT"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SETRANGE +func (optcluster *OptCluster) BO_SETRANGE() { + t1 := time.Now() + setrangekey := "setrange_" + optcluster.KeySuffix + optcluster.ClusterClient.Set(setrangekey, setrangekey, optcluster.EXPIRE) + optcluster.ClusterClient.SetRange(setrangekey, rand.Int63n(int64(optcluster.Loopstep)), strconv.Itoa(rand.Intn(optcluster.Loopstep))) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "SETRANGE"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//HINCRBY and HINCRBYFLOAT +func (optcluster *OptCluster) BO_HINCRBY_HINCRBYFLOAT() { + t1 := time.Now() + hincrbykey := "hincrby_" + optcluster.KeySuffix + hincrbyfloatkey := "hincrbyfloat_" + optcluster.KeySuffix + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.HIncrBy(hincrbykey, hincrbykey+strconv.Itoa(rand.Intn(optcluster.Loopstep)), int64(rand.Intn(optcluster.Loopstep))) + optcluster.ClusterClient.HIncrByFloat(hincrbyfloatkey, hincrbyfloatkey+strconv.Itoa(rand.Intn(optcluster.Loopstep)), rand.Float64()*10) + } + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "HINCRBY_HINCRBYFLOAT"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//HSET HMSET HSETNX HDEL +func (optcluster *OptCluster) BO_HSET_HMSET_HSETNX() { + t1 := time.Now() + nodeprefix := rand.Int() + hsetkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "hset_" + optcluster.KeySuffix + hmsetkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "hmset_" + optcluster.KeySuffix + fieldmap := make(map[string]interface{}) + + for i := 0; i < optcluster.Loopstep; i++ { + field := hmsetkey + strconv.Itoa(i) + fieldmap[field] = field + } + + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.HSet(hsetkey, hsetkey+strconv.Itoa(i), hsetkey+strconv.Itoa(i)) + } + + optcluster.ClusterClient.HMSet(hmsetkey, fieldmap) + + //HSETNX + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.HSetNX(hmsetkey, hmsetkey+strconv.Itoa(rand.Intn(optcluster.Loopstep*2)), hmsetkey+strconv.Itoa(i)) + } + + //HDEL + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.HDel(hmsetkey, hmsetkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + } + } + + optcluster.ClusterClient.Expire(hsetkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(hmsetkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "HSET_HMSET_HSETNX_HDEL"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//LPUSH and LPOP and LPUSHX and LSET +func (optcluster *OptCluster) BO_LPUSH_LPOP_LPUSHX() { + t1 := time.Now() + nodeprefix := rand.Int() + lpushkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "lpush_" + optcluster.KeySuffix + lpushxkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "lpushx_" + optcluster.KeySuffix + values := make([]interface{}, optcluster.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = lpushkey + strconv.Itoa(i) + } + + optcluster.ClusterClient.LPush(lpushkey, values...) + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 != 0 { + optcluster.ClusterClient.LSet(lpushkey, int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep), lpushkey+strconv.Itoa(i)) + } + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 != 0 { + optcluster.ClusterClient.LPop(lpushkey) + } + } + optcluster.ClusterClient.LPushX(lpushxkey, values) + optcluster.ClusterClient.LPushX(lpushkey, values) + + optcluster.ClusterClient.Expire(lpushkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "LPUSH_LPOP_LPUSHX_LSET"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//LREM and LTRIM and LINSERT +func (optcluster *OptCluster) BO_LREM_LTRIM_LINSERT() { + t1 := time.Now() + nodeprefix := rand.Int() + lremkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "lrem_" + optcluster.KeySuffix + ltrimkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "ltrim_" + optcluster.KeySuffix + values := make([]interface{}, optcluster.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = lremkey + strconv.Itoa(i) + } + optcluster.ClusterClient.LPush(lremkey, values...) + optcluster.ClusterClient.LPush(ltrimkey, values...) + + for i := 0; i < optcluster.Loopstep; i++ { + op := "BEFORE" + if i%2 == 0 { + op = "AFTER" + } else { + op = "BEFORE" + } + optcluster.ClusterClient.LInsert(lremkey, op, lremkey+strconv.Itoa(rand.Intn(optcluster.Loopstep)), lremkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + } + + optcluster.ClusterClient.LRem(lremkey, int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep), lremkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + optcluster.ClusterClient.LTrim(ltrimkey, int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep), int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep)) + optcluster.ClusterClient.Expire(lremkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(ltrimkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "LREM_TRIM_LINSERT"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//RPUSH RPUSHX RPOP RPOPLPUSH +func (optcluster *OptCluster) BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH() { + t1 := time.Now() + rpushkey := "rpush_" + optcluster.KeySuffix + rpushxkey := "rpushx_" + optcluster.KeySuffix + values := make([]interface{}, optcluster.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = rpushkey + strconv.Itoa(i) + } + optcluster.ClusterClient.RPush(rpushkey, values...) + optcluster.ClusterClient.RPushX(rpushxkey, values...) + optcluster.ClusterClient.RPushX(rpushkey, values...) + + //rpoplpush 操作同一个key相当于将列表逆转 + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.RPopLPush(rpushkey, rpushkey) + } + } + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.RPop(rpushkey) + } + } + optcluster.ClusterClient.Expire(rpushkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(rpushxkey, optcluster.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//BLPOP BRPOP BRPOPLPUSH BRPOPLPUSH +//BRPOPLPUSH 命令在集群模式下key分布在不同节点会报错(error) CROSSSLOT Keys in request don't hash to the same slot +func (optcluster *OptCluster) BO_BLPOP_BRPOP_BRPOPLPUSH() { + t1 := time.Now() + nodeprefix := rand.Int() + blpopkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "blpop_" + optcluster.KeySuffix + brpopkey := "{" + strconv.Itoa(nodeprefix) + ":}" + "brpop_" + optcluster.KeySuffix + + values := make([]interface{}, optcluster.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = blpopkey + strconv.Itoa(i) + } + + optcluster.ClusterClient.RPush(blpopkey, values...) + optcluster.ClusterClient.RPush(brpopkey, values...) + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.RPop(blpopkey) + optcluster.ClusterClient.RPop(brpopkey) + } + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.BRPopLPush(blpopkey, blpopkey, optcluster.EXPIRE) + } + } + + optcluster.ClusterClient.Expire(blpopkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(brpopkey, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_BLPOP_BRPOP_BRPOPLPUSH"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SADD SMOVE SPOP SREM +func (optcluster *OptCluster) BO_SADD_SMOVE_SPOP_SREM() { + t1 := time.Now() + + saddkey := "sadd_" + optcluster.KeySuffix + smovekey := "smove_" + optcluster.KeySuffix + spopkey := optcluster.KeySuffix + sremkey := optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.SAdd(saddkey, saddkey+strconv.Itoa(i)) + optcluster.ClusterClient.SAdd(smovekey, smovekey+strconv.Itoa(i)) + optcluster.ClusterClient.SAdd(spopkey, spopkey+strconv.Itoa(i)) + optcluster.ClusterClient.SAdd(sremkey, sremkey+strconv.Itoa(i)) + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.SPop(spopkey) + } + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.SRem(sremkey, sremkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + } + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.SMove(smovekey, smovekey, saddkey+strconv.Itoa(i)) + } + } + + optcluster.ClusterClient.Del(saddkey) + optcluster.ClusterClient.Expire(smovekey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(spopkey, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(sremkey, optcluster.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//SDIFFSTORE SINTERSTORE SUNIONSTORE +func (optcluster *OptCluster) BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE() { + t1 := time.Now() + nodeprefix := rand.Int() + sdiff1 := "{" + strconv.Itoa(nodeprefix) + ":}" + "sdiff1_" + optcluster.KeySuffix + sdiff2 := "{" + strconv.Itoa(nodeprefix) + ":}" + "sdiff2_" + optcluster.KeySuffix + sdiffstore := "{" + strconv.Itoa(nodeprefix) + ":}" + "sdiffsotre_" + optcluster.KeySuffix + sinterstore := "{" + strconv.Itoa(nodeprefix) + ":}" + "sintersotre_" + optcluster.KeySuffix + sunionstore := "{" + strconv.Itoa(nodeprefix) + ":}" + "sunionstore_" + optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + optcluster.ClusterClient.SAdd(sdiff1, optcluster.KeySuffix+strconv.Itoa(rand.Intn(2*optcluster.Loopstep))) + optcluster.ClusterClient.SAdd(sdiff2, optcluster.KeySuffix+strconv.Itoa(rand.Intn(2*optcluster.Loopstep))) + } + + optcluster.ClusterClient.SDiffStore(sdiffstore, sdiff1, sdiff2) + optcluster.ClusterClient.SInterStore(sinterstore, sdiff1, sdiff2) + optcluster.ClusterClient.SUnionStore(sunionstore, sdiff1, sdiff2) + + optcluster.ClusterClient.Expire(sdiffstore, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(sinterstore, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(sunionstore, optcluster.EXPIRE) + + optcluster.ClusterClient.Del(sdiff1, sdiff2) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZADD ZINCRBY ZREM +//start version:1.2.0 +func (optcluster OptCluster) BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM() { + t1 := time.Now() + + zaddkey := "zadd_" + optcluster.KeySuffix + zincrby := "zincrby_" + optcluster.KeySuffix + zrem := optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zaddkey + strconv.Itoa(i), + } + optcluster.ClusterClient.ZAdd(zaddkey, &z) + optcluster.ClusterClient.ZAdd(zincrby, &z) + optcluster.ClusterClient.ZAdd(zrem, &z) + + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.ZIncrBy(zincrby, float64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep), zaddkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + optcluster.ClusterClient.ZRem(zrem, zaddkey+strconv.Itoa(rand.Intn(optcluster.Loopstep))) + } + } + + optcluster.ClusterClient.Expire(zincrby, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(zrem, optcluster.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZPOPMAX ZPOPMIN +//start version:5.0 +func (optcluster OptCluster) BO_ZPOPMAX_ZPOPMIN() { + t1 := time.Now() + + zpopmax := "zpopmax_" + optcluster.KeySuffix + zpopmin := "zpopmin_" + optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zpopmax + strconv.Itoa(i), + } + optcluster.ClusterClient.ZAdd(zpopmax, &z) + optcluster.ClusterClient.ZAdd(zpopmin, &z) + } + + for i := 0; i < optcluster.Loopstep; i++ { + if rand.Intn(optcluster.Loopstep)%2 == 0 { + optcluster.ClusterClient.ZPopMax(zpopmax, 1) + optcluster.ClusterClient.ZPopMin(zpopmin, 1) + } + } + + optcluster.ClusterClient.Expire(zpopmax, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(zpopmin, optcluster.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_ZPOPMAX_ZPOPMIN"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE +//start version:2.8.9 +func (optcluster *OptCluster) BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE() { + t1 := time.Now() + nodeprefix := rand.Int() + zremrangebylex := "{" + strconv.Itoa(nodeprefix) + ":}" + "zremrangebylex_" + optcluster.KeySuffix + zremrangebyrank := "{" + strconv.Itoa(nodeprefix) + ":}" + "zremrangebyrank_" + optcluster.KeySuffix + zremrangebyscore := "{" + strconv.Itoa(nodeprefix) + ":}" + "zremrangebyscore_" + optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zremrangebylex + strconv.Itoa(i), + } + optcluster.ClusterClient.ZAdd(zremrangebylex, &z) + optcluster.ClusterClient.ZAdd(zremrangebyrank, &z) + optcluster.ClusterClient.ZAdd(zremrangebyscore, &z) + } + + optcluster.ClusterClient.ZRemRangeByLex(zremrangebylex, zremrangebylex+strconv.Itoa(0), zremrangebylex+strconv.Itoa(rand.Intn(optcluster.Loopstep-1))) + optcluster.ClusterClient.ZRemRangeByRank(zremrangebyrank, int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep), int64(rand.Intn(2*optcluster.Loopstep)-optcluster.Loopstep)) + optcluster.ClusterClient.ZRemRangeByScore(zremrangebyscore, strconv.Itoa(rand.Intn(optcluster.Loopstep)), strconv.Itoa(rand.Intn(optcluster.Loopstep))) + + optcluster.ClusterClient.Expire(zremrangebylex, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(zremrangebyrank, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(zremrangebyscore, optcluster.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//ZUNIONSTORE ZINTERSTORE +func (optcluster *OptCluster) BO_ZUNIONSTORE_ZINTERSTORE() { + t1 := time.Now() + nodeprefix := rand.Int() + zset1 := "{" + strconv.Itoa(nodeprefix) + ":}" + "zset1_" + optcluster.KeySuffix + zset2 := "{" + strconv.Itoa(nodeprefix) + ":}" + "zset2_" + optcluster.KeySuffix + zset3 := "{" + strconv.Itoa(nodeprefix) + ":}" + "zset3_" + optcluster.KeySuffix + zinterstore := "{" + strconv.Itoa(nodeprefix) + ":}" + "zinterstore_" + optcluster.KeySuffix + zunionstore := "{" + strconv.Itoa(nodeprefix) + ":}" + "zunionstore_" + optcluster.KeySuffix + + for i := 0; i < optcluster.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zset1 + strconv.Itoa(i), + } + optcluster.ClusterClient.ZAdd(zset1, &z) + optcluster.ClusterClient.ZAdd(zset2, &z) + optcluster.ClusterClient.ZAdd(zset3, &z) + } + + zstore := redis.ZStore{ + Keys: []string{zset1, zset2, zset3}, + Weights: []float64{float64(rand.Intn(optcluster.Loopstep))}, + } + + optcluster.ClusterClient.ZInterStore(zinterstore, &zstore) + optcluster.ClusterClient.ZUnionStore(zunionstore, &zstore) + + optcluster.ClusterClient.Del(zset1, zset2, zset3) + optcluster.ClusterClient.Expire(zinterstore, optcluster.EXPIRE) + optcluster.ClusterClient.Expire(zunionstore, optcluster.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optcluster.DB), zap.String("command", "BO_ZUNIONSTORE_ZINTERSTORE"), zap.String("KeySuffix", optcluster.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//随机执行一个基础操作 +func (optcluster *OptCluster) ExecRandOptCluster() { + optcluster.OptType = BaseOptArray[rand.Intn(len(BaseOptArray))] + optcluster.ExecOpt() +} + +//随机执行一个基础操作半数概率为Set操作 +func (optcluster *OptCluster) ExecRandOptHalfIsSetCluster() { + if rand.Int()%2 == 0 { + optcluster.OptType = BO_SET_SETNX + optcluster.ExecOpt() + } else { + optcluster.ExecRandOptCluster() + } +} + +//遍历执行所有基本操作 +func (optcluster *OptCluster) ExecAllBasicOptCluster() { + for _, v := range BaseOptArray { + optcluster.OptType = v + optcluster.ExecOpt() + } +} + +//持续随机执行基础操作 +func (optcluster *OptCluster) KeepExecBasicOptCluster(ctx context.Context, sleeptime time.Duration) { + i := int64(0) + keysuffix := optcluster.KeySuffix + + //会引起CROSSSLOT Keys in request don't hash to the same slot错误的命令列表 + tocluster_skip_array := map[OptType]string{ + BO_MSET_MSETNX: "BO_MSET_MSETNX", + BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE: "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE", + BO_ZUNIONSTORE_ZINTERSTORE: "BO_ZUNIONSTORE_ZINTERSTORE", + } + + for { + randi := rand.Intn(2 * len(BaseOptArray)) + optcluster.KeySuffix = keysuffix + strconv.FormatInt(i, 10) + if randi < len(BaseOptArray) { + optcluster.OptType = BaseOptArray[randi] + } else { + optcluster.OptType = BO_SET_SETNX + } + + if _, ok := tocluster_skip_array[optcluster.OptType]; ok { + continue + } + + optcluster.ExecOpt() + + i++ + time.Sleep(sleeptime) + select { + case <-ctx.Done(): + return + default: + continue + } + } + +} diff --git a/generatedata/baseoptcluster_test.go b/generatedata/baseoptcluster_test.go new file mode 100644 index 0000000..2d21711 --- /dev/null +++ b/generatedata/baseoptcluster_test.go @@ -0,0 +1,32 @@ +package generatedata + +import ( + "github.com/go-redis/redis/v7" + "testing" + "time" +) + +func TestOptCluster_ExecOpt(t *testing.T) { + rdb := redis.NewClusterClient(&redis.ClusterOptions{ + Addrs: []string{"114.67.67.7:16379", + " 114.67.67.7:16380", + " 114.67.83.163:16379 ", + " 114.67.83.163:16380 ", + " 114.67.112.67:16379 ", + " 114.67.112.67:16380"}, + Password: "testredis0102", + }) + oc := &OptCluster{ + ClusterClient: rdb, + OptType: BO_APPEND, + KeySuffix: "clustertest", + Loopstep: 20, + EXPIRE: 600 * time.Second, + } + + for _, v := range BaseOptArray { + oc.OptType = v + oc.ExecOpt() + + } +} diff --git a/generatedata/baseoptsingle.go b/generatedata/baseoptsingle.go new file mode 100644 index 0000000..5e1a184 --- /dev/null +++ b/generatedata/baseoptsingle.go @@ -0,0 +1,746 @@ +//Package generatedata 用于生成测试过程中的数据 + +package generatedata + +import ( + "context" + "github.com/go-redis/redis/v7" + "go.uber.org/zap" + "math/rand" + "strconv" + "strings" + "testcase/global" + //"testcase/globalzap" + "time" +) + +var zaplogger = global.RSPLog + +type OptSingle struct { + RedisConn *redis.Conn + RedisVersion string + OptType OptType + KeySuffix string + Loopstep int + EXPIRE time.Duration + DB int +} + +func (optsingle *OptSingle) ExecOpt() { + + switch optsingle.OptType.String() { + case "BO_APPEND": + optsingle.BO_APPEND() + case "BO_BITOP": + optsingle.BO_BITOP() + case "BO_DECR_DECRBY": + optsingle.BO_DECR_DECRBY() + case "BO_INCR_INCRBY_INCRBYFLOAT": + optsingle.BO_INCR_INCRBY_INCRBYFLOAT() + case "BO_MSET_MSETNX": + optsingle.BO_MSET_MSETNX() + //case "BO_PSETEX_SETEX": + // optsingle.BO_PSETEX_SETEX() + case "BO_PFADD": + optsingle.BO_PFADD() + case "BO_PFMERGE": + optsingle.BO_PFMERGE() + case "BO_SET_SETNX": + optsingle.BO_SET_SETNX() + case "BO_SETBIT": + optsingle.BO_SETBIT() + case "BO_SETRANGE": + optsingle.BO_SETRANGE() + case "BO_HINCRBY_HINCRBYFLOAT": + optsingle.BO_HINCRBY_HINCRBYFLOAT() + case "BO_HSET_HMSET_HSETNX": + optsingle.BO_HSET_HMSET_HSETNX() + case "BO_LPUSH_LPOP_LPUSHX": + optsingle.BO_LPUSH_LPOP_LPUSHX() + case "BO_LREM_LTRIM_LINSERT": + optsingle.BO_LREM_LTRIM_LINSERT() + case "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH": + optsingle.BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH() + case "BO_BLPOP_BRPOP_BRPOPLPUSH": + optsingle.BO_BLPOP_BRPOP_BRPOPLPUSH() + case "BO_SADD_SMOVE_SPOP_SREM": + optsingle.BO_SADD_SMOVE_SPOP_SREM() + case "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE": + optsingle.BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE() + case "BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM": + optsingle.BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM() + case "BO_ZPOPMAX_ZPOPMIN": + optsingle.BO_ZPOPMAX_ZPOPMIN() + case "BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE": + optsingle.BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE() + case "BO_UNIONSTORE_ZINTERSTORE": + optsingle.BO_ZUNIONSTORE_ZINTERSTORE() + default: + return + } + +} + +// 比较目标库版本是否小于要求版本 +func (optsingle OptSingle) VersionLessThan(version string) bool { + + boverarray := strings.Split(optsingle.RedisVersion, ".") + versionarry := strings.Split(version, ".") + + bover := "" + ver := "" + for i := 0; i < 3; i++ { + if i < len(boverarray) { + bover = bover + boverarray[i] + } else { + bover = bover + "0" + } + + if i < len(ver) { + ver = ver + versionarry[i] + } else { + ver = ver + "0" + } + } + + intbover, _ := strconv.Atoi(bover) + intver, _ := strconv.Atoi(ver) + + if intbover < intver { + return true + } + + return false + +} + +//SELECT命令 +func (optsingle *OptSingle) BO_SELECT(db int) { + + _, err := optsingle.RedisConn.Select(db).Result() + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + optsingle.DB = db +} + +//APPEND 命令基本操作 +//start version:2.0.0 +func (optsingle *OptSingle) BO_APPEND() { + t1 := time.Now() + appended := "append_" + optsingle.KeySuffix + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.Append(appended, strconv.Itoa(i)) + } + optsingle.RedisConn.Expire(appended, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "APPEND"), zap.String("key", appended), zap.Int64("time", t2.Sub(t1).Milliseconds())) + +} + +//BITOP +//start version:2.6.0 +func (optsingle *OptSingle) BO_BITOP() { + t1 := time.Now() + strarry := []string{} + opandkey := "opand_" + optsingle.KeySuffix + oporkey := "opor_" + optsingle.KeySuffix + opxorkey := "opxor_" + optsingle.KeySuffix + opnotkey := "opnot_" + optsingle.KeySuffix + for i := 0; i < optsingle.Loopstep; i++ { + bitopkey := "bitop_" + optsingle.KeySuffix + strconv.Itoa(i) + optsingle.RedisConn.Set(bitopkey, bitopkey, optsingle.EXPIRE) + strarry = append(strarry, bitopkey) + } + + optsingle.RedisConn.BitOpAnd(opandkey, strarry...) + optsingle.RedisConn.BitOpOr(oporkey, strarry...) + optsingle.RedisConn.BitOpXor(opxorkey, strarry...) + optsingle.RedisConn.BitOpNot(opnotkey, strarry[0]) + optsingle.RedisConn.Expire(opandkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(oporkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(opxorkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(opnotkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BITOP"), zap.Any("keys", []string{opandkey, oporkey, opxorkey, opnotkey}), zap.Duration("time", t2.Sub(t1))) +} + +//DECR and DECRBY +func (optsingle *OptSingle) BO_DECR_DECRBY() { + t1 := time.Now() + desckey := "desc_" + optsingle.KeySuffix + optsingle.RedisConn.Set(desckey, optsingle.Loopstep, optsingle.EXPIRE) + optsingle.RedisConn.Decr(desckey) + optsingle.RedisConn.DecrBy(desckey, rand.Int63n(int64(optsingle.Loopstep))) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "DECR_DECRBY"), zap.String("key", desckey), zap.Duration("time", t2.Sub(t1))) +} + +//INCR and INCRBY and INCRBYFLOAT +func (optsingle *OptSingle) BO_INCR_INCRBY_INCRBYFLOAT() { + t1 := time.Now() + incrkey := "incr_" + optsingle.KeySuffix + optsingle.RedisConn.Set(incrkey, rand.Intn(optsingle.Loopstep), optsingle.EXPIRE) + optsingle.RedisConn.Incr(incrkey) + optsingle.RedisConn.IncrBy(incrkey, rand.Int63n(int64(optsingle.Loopstep))) + optsingle.RedisConn.IncrByFloat(incrkey, rand.Float64()*float64(rand.Intn(optsingle.Loopstep))) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "INCR_INCRBY_INCRBYFLOAT"), zap.String("key", incrkey), zap.Duration("time", t2.Sub(t1))) +} + +//MSET and MSETNX +func (optsingle *OptSingle) BO_MSET_MSETNX() { + t1 := time.Now() + msetarry := []string{} + msetnxarry := []string{} + + msetkey := "mset_" + optsingle.KeySuffix + msetnxkey := "msetnx_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + msetarry = append(msetarry, msetkey+strconv.Itoa(i)) + msetarry = append(msetarry, msetkey+strconv.Itoa(i)) + msetnxarry = append(msetnxarry, msetnxkey+strconv.Itoa(i)) + msetnxarry = append(msetnxarry, msetnxkey+strconv.Itoa(i)) + } + + optsingle.RedisConn.MSetNX(msetnxarry) + optsingle.RedisConn.MSet(msetarry) + optsingle.RedisConn.MSetNX(msetnxarry) + + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.Expire(msetkey+strconv.Itoa(i), optsingle.EXPIRE) + optsingle.RedisConn.Expire(msetnxkey+strconv.Itoa(i), optsingle.EXPIRE) + } + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "MGET_MSETNX"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +////PSETEX and SETEX +//func (bo *OptSingle) BO_PSETEX_SETEX() { +// t1 := time.Now() +// psetexkey := "psetex_" + bo.KeySuffix +// setexkey := "setex_" + bo.KeySuffix +// bo.RedisConn.SetNX(setexkey, setexkey, bo.EXPIRE) +// bo.RedisConn. +// bo.RedisConn.Do("SETEX", setexkey, bo.EXPIRE.Seconds(), setexkey) +// bo.RedisConn.Do("PSETEX", psetexkey, bo.EXPIRE.Milliseconds(), psetexkey) +// t2 := time.Now() +// zaplogger.Info("ExecCMD", zap.String("command", "MGET_MSETNX"), zap.String("KeySuffix", bo.KeySuffix), zap.Duration("time", t2.Sub(t1))) +// +//} + +//PFADD +func (optsingle *OptSingle) BO_PFADD() { + t1 := time.Now() + pfaddkey := "pfadd_" + optsingle.KeySuffix + rand.Seed(time.Now().UnixNano()) + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.PFAdd(pfaddkey, rand.Float64()*float64(rand.Int())) + } + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_PFADD"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//PFMERGE +func (optsingle *OptSingle) BO_PFMERGE() { + t1 := time.Now() + pfaddkey := "pfadd_" + optsingle.KeySuffix + pfmergekey := "pfmerge_" + optsingle.KeySuffix + pfaddkeyarray := []string{} + rand.Seed(time.Now().UnixNano()) + for i := 0; i < optsingle.Loopstep; i++ { + key := pfaddkey + strconv.Itoa(i) + optsingle.RedisConn.PFAdd(key, rand.Float64()*float64(rand.Int())) + pfaddkeyarray = append(pfaddkeyarray, key) + } + optsingle.RedisConn.PFMerge(pfmergekey, pfaddkeyarray...) + + for _, v := range pfaddkeyarray { + optsingle.RedisConn.Del(v) + } + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_PFMERGE"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SET and SETNX +func (optsingle *OptSingle) BO_SET_SETNX() { + t1 := time.Now() + setkey := "set_" + optsingle.KeySuffix + setnxkey := "setnx_" + optsingle.KeySuffix + optsingle.RedisConn.Set(setkey, setkey, optsingle.EXPIRE) + optsingle.RedisConn.SetNX(setnxkey, setnxkey, optsingle.EXPIRE) + optsingle.RedisConn.SetNX(setnxkey, setkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "SET_SETNX"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SETBIT +func (optsingle *OptSingle) BO_SETBIT() { + t1 := time.Now() + setbitkey := "setbit_" + optsingle.KeySuffix + optsingle.RedisConn.SetBit(setbitkey, rand.Int63n(int64(optsingle.Loopstep)), rand.Intn(optsingle.Loopstep)) + optsingle.RedisConn.Expire(setbitkey, optsingle.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "SETBIT"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SETRANGE +func (optsingle *OptSingle) BO_SETRANGE() { + t1 := time.Now() + setrangekey := "setrange_" + optsingle.KeySuffix + optsingle.RedisConn.Set(setrangekey, setrangekey, optsingle.EXPIRE) + optsingle.RedisConn.SetRange(setrangekey, rand.Int63n(int64(optsingle.Loopstep)), strconv.Itoa(rand.Intn(optsingle.Loopstep))) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "SETRANGE"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//HINCRBY and HINCRBYFLOAT +func (optsingle *OptSingle) BO_HINCRBY_HINCRBYFLOAT() { + t1 := time.Now() + hincrbykey := "hincrby_" + optsingle.KeySuffix + hincrbyfloatkey := "hincrbyfloat_" + optsingle.KeySuffix + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.HIncrBy(hincrbykey, hincrbykey+strconv.Itoa(rand.Intn(optsingle.Loopstep)), int64(rand.Intn(optsingle.Loopstep))) + optsingle.RedisConn.HIncrByFloat(hincrbyfloatkey, hincrbyfloatkey+strconv.Itoa(rand.Intn(optsingle.Loopstep)), rand.Float64()*10) + } + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "HINCRBY_HINCRBYFLOAT"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//HSET HMSET HSETNX HDEL +func (optsingle *OptSingle) BO_HSET_HMSET_HSETNX() { + t1 := time.Now() + hsetkey := "hset_" + optsingle.KeySuffix + hmsetkey := "hmset_" + optsingle.KeySuffix + fieldmap := make(map[string]interface{}) + + for i := 0; i < optsingle.Loopstep; i++ { + field := hmsetkey + strconv.Itoa(i) + fieldmap[field] = field + } + + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.HSet(hsetkey, hsetkey+strconv.Itoa(i), hsetkey+strconv.Itoa(i)) + } + + optsingle.RedisConn.HMSet(hmsetkey, fieldmap) + + //HSETNX + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.HSetNX(hmsetkey, hmsetkey+strconv.Itoa(rand.Intn(optsingle.Loopstep*2)), hmsetkey+strconv.Itoa(i)) + } + + //HDEL + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.HDel(hmsetkey, hmsetkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + } + } + + optsingle.RedisConn.Expire(hsetkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(hmsetkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "HSET_HMSET_HSETNX_HDEL"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//LPUSH and LPOP and LPUSHX and LSET +func (optsingle *OptSingle) BO_LPUSH_LPOP_LPUSHX() { + t1 := time.Now() + lpushkey := "lpush_" + optsingle.KeySuffix + lpushxkey := "lpushx_" + optsingle.KeySuffix + values := make([]interface{}, optsingle.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = lpushkey + strconv.Itoa(i) + } + + optsingle.RedisConn.LPush(lpushkey, values...) + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 != 0 { + optsingle.RedisConn.LSet(lpushkey, int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep), lpushkey+strconv.Itoa(i)) + } + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 != 0 { + optsingle.RedisConn.LPop(lpushkey) + } + } + optsingle.RedisConn.LPushX(lpushxkey, values) + optsingle.RedisConn.LPushX(lpushkey, values) + + optsingle.RedisConn.Expire(lpushkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "LPUSH_LPOP_LPUSHX_LSET"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//LREM and LTRIM and LINSERT +func (optsingle *OptSingle) BO_LREM_LTRIM_LINSERT() { + t1 := time.Now() + lremkey := "lrem_" + optsingle.KeySuffix + ltrimkey := "ltrim_" + optsingle.KeySuffix + values := make([]interface{}, optsingle.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = lremkey + strconv.Itoa(i) + } + optsingle.RedisConn.LPush(lremkey, values...) + optsingle.RedisConn.LPush(ltrimkey, values...) + + for i := 0; i < optsingle.Loopstep; i++ { + op := "BEFORE" + if i%2 == 0 { + op = "AFTER" + } else { + op = "BEFORE" + } + optsingle.RedisConn.LInsert(lremkey, op, lremkey+strconv.Itoa(rand.Intn(optsingle.Loopstep)), lremkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + } + + optsingle.RedisConn.LRem(lremkey, int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep), lremkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + optsingle.RedisConn.LTrim(ltrimkey, int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep), int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep)) + optsingle.RedisConn.Expire(lremkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(ltrimkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "LREM_TRIM_LINSERT"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//RPUSH RPUSHX RPOP RPOPLPUSH +func (optsingle *OptSingle) BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH() { + t1 := time.Now() + rpushkey := "rpush_" + optsingle.KeySuffix + rpushxkey := "rpushx_" + optsingle.KeySuffix + values := make([]interface{}, optsingle.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = rpushkey + strconv.Itoa(i) + } + optsingle.RedisConn.RPush(rpushkey, values...) + optsingle.RedisConn.RPushX(rpushxkey, values...) + optsingle.RedisConn.RPushX(rpushkey, values...) + + //rpoplpush 操作同一个key相当于将列表逆转 + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.RPopLPush(rpushkey, rpushkey) + } + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.RPop(rpushkey) + } + } + + optsingle.RedisConn.Expire(rpushkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(rpushxkey, optsingle.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//BLPOP BRPOP BRPOPLPUSH +//BRPOPLPUSH 集群模式下key分布在不同节点会报错(error) CROSSSLOT Keys in request don't hash to the same slot +func (optsingle *OptSingle) BO_BLPOP_BRPOP_BRPOPLPUSH() { + t1 := time.Now() + blpopkey := "blpop_" + optsingle.KeySuffix + brpopkey := "brpop_" + optsingle.KeySuffix + + values := make([]interface{}, optsingle.Loopstep) + for i := 0; i < len(values); i++ { + values[i] = blpopkey + strconv.Itoa(i) + } + + optsingle.RedisConn.RPush(blpopkey, values...) + optsingle.RedisConn.RPush(brpopkey, values...) + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.BRPopLPush(blpopkey, blpopkey, optsingle.EXPIRE) + } + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.RPop(blpopkey) + optsingle.RedisConn.RPop(brpopkey) + } + } + + optsingle.RedisConn.Expire(blpopkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(brpopkey, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_BLPOP_BRPOP"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//SADD SMOVE SPOP SREM +func (optsingle *OptSingle) BO_SADD_SMOVE_SPOP_SREM() { + t1 := time.Now() + saddkey := "sadd_" + optsingle.KeySuffix + smovekey := "smove_" + optsingle.KeySuffix + spopkey := "spop_" + optsingle.KeySuffix + sremkey := "srem_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.SAdd(saddkey, saddkey+strconv.Itoa(i)) + optsingle.RedisConn.SAdd(smovekey, smovekey+strconv.Itoa(i)) + optsingle.RedisConn.SAdd(spopkey, spopkey+strconv.Itoa(i)) + optsingle.RedisConn.SAdd(sremkey, sremkey+strconv.Itoa(i)) + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.SPop(spopkey) + } + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.SRem(sremkey, sremkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + } + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.SMove(smovekey, smovekey, saddkey+strconv.Itoa(i)) + } + } + + optsingle.RedisConn.Del(saddkey) + optsingle.RedisConn.Expire(smovekey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(spopkey, optsingle.EXPIRE) + optsingle.RedisConn.Expire(sremkey, optsingle.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_RPUSH_RPUSHX_RPOP_RPOPLPUSH"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//SDIFFSTORE SINTERSTORE SUNIONSTORE 集群模式下key分布在不同节点会报错(error) CROSSSLOT Keys in request don't hash to the same slot +func (optsingle *OptSingle) BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE() { + t1 := time.Now() + sdiff1 := "sdiff1_" + optsingle.KeySuffix + sdiff2 := "sdiff2_" + optsingle.KeySuffix + sdiffstore := "sdiffsotre_" + optsingle.KeySuffix + sinterstore := "sintersotre_" + optsingle.KeySuffix + sunionstore := "sunionstore_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + optsingle.RedisConn.SAdd(sdiff1, optsingle.KeySuffix+strconv.Itoa(rand.Intn(2*optsingle.Loopstep))) + optsingle.RedisConn.SAdd(sdiff2, optsingle.KeySuffix+strconv.Itoa(rand.Intn(2*optsingle.Loopstep))) + } + + optsingle.RedisConn.SDiffStore(sdiffstore, sdiff1, sdiff2) + optsingle.RedisConn.SInterStore(sinterstore, sdiff1, sdiff2) + optsingle.RedisConn.SUnionStore(sunionstore, sdiff1, sdiff2) + + optsingle.RedisConn.Expire(sdiffstore, optsingle.EXPIRE) + optsingle.RedisConn.Expire(sinterstore, optsingle.EXPIRE) + optsingle.RedisConn.Expire(sunionstore, optsingle.EXPIRE) + + optsingle.RedisConn.Del(sdiff1, sdiff2) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZADD ZINCRBY ZREM +//start version:1.2.0 +func (optsingle OptSingle) BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM() { + t1 := time.Now() + + zaddkey := "zadd_" + optsingle.KeySuffix + zincrby := "zincrby_" + optsingle.KeySuffix + zrem := "zrem_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zaddkey + strconv.Itoa(i), + } + optsingle.RedisConn.ZAdd(zaddkey, &z) + optsingle.RedisConn.ZAdd(zincrby, &z) + optsingle.RedisConn.ZAdd(zrem, &z) + + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.ZIncrBy(zincrby, float64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep), zaddkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + optsingle.RedisConn.ZRem(zrem, zaddkey+strconv.Itoa(rand.Intn(optsingle.Loopstep))) + } + } + + optsingle.RedisConn.Expire(zincrby, optsingle.EXPIRE) + optsingle.RedisConn.Expire(zrem, optsingle.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_ZADD_ZINCRBY_ZPOPMAX_ZPOPMIN_ZREM"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZPOPMAX ZPOPMIN +//start version:5.0 +func (optsingle OptSingle) BO_ZPOPMAX_ZPOPMIN() { + t1 := time.Now() + + zpopmax := "zpopmax_" + optsingle.KeySuffix + zpopmin := "zpopmin_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zpopmax + strconv.Itoa(i), + } + optsingle.RedisConn.ZAdd(zpopmax, &z) + optsingle.RedisConn.ZAdd(zpopmin, &z) + } + + for i := 0; i < optsingle.Loopstep; i++ { + if rand.Intn(optsingle.Loopstep)%2 == 0 { + optsingle.RedisConn.ZPopMax(zpopmax, 1) + optsingle.RedisConn.ZPopMin(zpopmin, 1) + } + } + + optsingle.RedisConn.Expire(zpopmax, optsingle.EXPIRE) + optsingle.RedisConn.Expire(zpopmin, optsingle.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_ZPOPMAX_ZPOPMIN"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) +} + +//ZREMRANGEBYLEX ZREMRANGEBYRANK ZREMRANGEBYSCORE +//start version:2.8.9 +func (optsingle *OptSingle) BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE() { + t1 := time.Now() + + zremrangebylex := "zremrangebylex_" + optsingle.KeySuffix + zremrangebyrank := "zremrangebyrank_" + optsingle.KeySuffix + zremrangebyscore := "zremrangebyscore_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zremrangebylex + strconv.Itoa(i), + } + optsingle.RedisConn.ZAdd(zremrangebylex, &z) + optsingle.RedisConn.ZAdd(zremrangebyrank, &z) + optsingle.RedisConn.ZAdd(zremrangebyscore, &z) + } + + optsingle.RedisConn.ZRemRangeByLex(zremrangebylex, zremrangebylex+strconv.Itoa(0), zremrangebylex+strconv.Itoa(rand.Intn(optsingle.Loopstep-1))) + optsingle.RedisConn.ZRemRangeByRank(zremrangebyrank, int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep), int64(rand.Intn(2*optsingle.Loopstep)-optsingle.Loopstep)) + optsingle.RedisConn.ZRemRangeByScore(zremrangebyscore, strconv.Itoa(rand.Intn(optsingle.Loopstep)), strconv.Itoa(rand.Intn(optsingle.Loopstep))) + + optsingle.RedisConn.Expire(zremrangebylex, optsingle.EXPIRE) + optsingle.RedisConn.Expire(zremrangebyrank, optsingle.EXPIRE) + optsingle.RedisConn.Expire(zremrangebyscore, optsingle.EXPIRE) + + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_ZREMRANGEBYLEX_ZREMRANGEBYRANK_ZREMRANGEBYSCORE"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +// BO_ZUNIONSTORE_ZINTERSTORE,集群模式下key分布在不同节点会报错(error) CROSSSLOT Keys in request don't hash to the same slot +func (optsingle *OptSingle) BO_ZUNIONSTORE_ZINTERSTORE() { + t1 := time.Now() + zset1 := "zset1_" + optsingle.KeySuffix + zset2 := "zset2_" + optsingle.KeySuffix + zset3 := "zset3_" + optsingle.KeySuffix + zinterstore := "zinterstore_" + optsingle.KeySuffix + zunionstore := "zunionstore_" + optsingle.KeySuffix + + for i := 0; i < optsingle.Loopstep; i++ { + z := redis.Z{ + Score: float64(i), + Member: zset1 + strconv.Itoa(i), + } + optsingle.RedisConn.ZAdd(zset1, &z) + optsingle.RedisConn.ZAdd(zset2, &z) + optsingle.RedisConn.ZAdd(zset3, &z) + } + + zstore := redis.ZStore{ + Keys: []string{zset1, zset2, zset3}, + Weights: []float64{float64(rand.Intn(optsingle.Loopstep))}, + } + + optsingle.RedisConn.ZInterStore(zinterstore, &zstore) + optsingle.RedisConn.ZUnionStore(zunionstore, &zstore) + + optsingle.RedisConn.Del(zset1, zset2, zset3) + optsingle.RedisConn.Expire(zinterstore, optsingle.EXPIRE) + optsingle.RedisConn.Expire(zunionstore, optsingle.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("ExecCMD", zap.Int("db", optsingle.DB), zap.String("command", "BO_ZUNIONSTORE_ZINTERSTORE"), zap.String("KeySuffix", optsingle.KeySuffix), zap.Duration("time", t2.Sub(t1))) + +} + +//随机执行一个基础操作 +func (optsingle *OptSingle) ExecRandOpt() { + optsingle.OptType = BaseOptArray[rand.Intn(len(BaseOptArray))] + optsingle.ExecOpt() +} + +//随机执行一个基础操作半数概率为Set操作 +func (optsingle *OptSingle) ExecRandOptHalfIsSet() { + if rand.Int()%2 == 0 { + optsingle.OptType = BO_SET_SETNX + optsingle.ExecOpt() + } else { + optsingle.ExecRandOpt() + } +} + +//遍历执行所有基本操作 +func (optsingle *OptSingle) ExecAllBasicOpt() { + for _, v := range BaseOptArray { + optsingle.OptType = v + optsingle.ExecOpt() + } +} + +//持续随机执行基础操作 +func (optsingle *OptSingle) KeepExecBasicOpt(ctx context.Context, sleeptime time.Duration, tocluster bool) { + i := int64(0) + keysuffix := optsingle.KeySuffix + //会引起CROSSSLOT Keys in request don't hash to the same slot错误的命令列表 + tocluster_skip_array := map[OptType]string{ + BO_MSET_MSETNX: "BO_MSET_MSETNX", + BO_PFMERGE: "BO_PFMERGE", + BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE: "BO_SDIFFSTORE_SINTERSTORE_SUNIONSTORE", + BO_ZUNIONSTORE_ZINTERSTORE: "BO_ZUNIONSTORE_ZINTERSTORE", + } + + for { + rand.Seed(time.Now().UnixNano()) + randi := rand.Intn(2 * len(BaseOptArray)) + optsingle.KeySuffix = keysuffix + strconv.FormatInt(i, 10) + if randi < len(BaseOptArray) { + optsingle.OptType = BaseOptArray[randi] + } else { + optsingle.OptType = BO_SET_SETNX + } + + if tocluster { + if _, ok := tocluster_skip_array[optsingle.OptType]; ok { + continue + } + } + + optsingle.ExecOpt() + + i++ + time.Sleep(sleeptime) + select { + case <-ctx.Done(): + return + default: + continue + } + } + +} diff --git a/generatedata/baseoptsingle_test.go b/generatedata/baseoptsingle_test.go new file mode 100644 index 0000000..54b696b --- /dev/null +++ b/generatedata/baseoptsingle_test.go @@ -0,0 +1,77 @@ +package generatedata + +import ( + "context" + "fmt" + "github.com/go-redis/redis/v7" + "sync" + "testcase/commons" + "testing" + "time" +) + +func TestBasicOpt_ExecOpt(t *testing.T) { + saddr := "114.67.100.239:6379" + opt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + opt.Password = "redistest0102" + client := commons.GetGoRedisClient(opt) + + baseopt := OptSingle{ + RedisConn: client.Conn(), + //RedisClient: client, + RedisVersion: "4.0", + OptType: BO_APPEND, + KeySuffix: "keysuffix", + Loopstep: 20, + EXPIRE: 600 * time.Second, + } + baseopt.RedisConn.Select(3) + + for _, v := range BaseOptArray { + baseopt.OptType = v + baseopt.ExecOpt() + } + + baseopt.ExecOpt() + + fmt.Println(baseopt.KeySuffix, baseopt.EXPIRE, baseopt.Loopstep) + fmt.Println("䷃") + +} + +func TestBasicOpt_KeepExecBasicOpt(t *testing.T) { + saddr := "114.67.100.239:6379" + opt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + opt.Password = "redistest0102" + client := commons.GetGoRedisClient(opt) + defer client.Close() + + baseopt := OptSingle{ + RedisConn: client.Conn(), + RedisVersion: "4.0", + OptType: BO_APPEND, + KeySuffix: "keysuffix", + Loopstep: 20, + EXPIRE: 600 * time.Second, + } + + wg := sync.WaitGroup{} + d := time.Now().Add(30000 * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg.Add(1) + go func() { + baseopt.KeepExecBasicOpt(ctx, 300*time.Millisecond) + zaplogger.Sugar().Info("KeepExecBasicOpt Finish!!!") + wg.Done() + }() + wg.Wait() + +} diff --git a/generatedata/genbigkv.go b/generatedata/genbigkv.go new file mode 100644 index 0000000..bfe7726 --- /dev/null +++ b/generatedata/genbigkv.go @@ -0,0 +1,158 @@ +package generatedata + +import ( + "github.com/go-redis/redis/v7" + "go.uber.org/zap" + "math/rand" + "strconv" + "sync" + "testcase/global" + "time" +) + +type GenBigKV struct { + RedisConn *redis.Conn + KeySuffix string + Length int //set、hash等容器型key的长度 + EXPIRE time.Duration + DB int + ValuePrefix string +} + +func (gbkv *GenBigKV) GenBigHash() string { + t1 := time.Now() + key := "BigHash_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.HSet(key, key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i)) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "hash"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKV) GenBigList() string { + t1 := time.Now() + key := "BigList_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.LPush(key, gbkv.ValuePrefix+strconv.Itoa(i)) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "list"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + + return key +} + +func (gbkv *GenBigKV) GenBigSet() string { + t1 := time.Now() + key := "BigSet_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.SAdd(key, gbkv.ValuePrefix+strconv.Itoa(i)) + } + + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "set"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} +func (gbkv *GenBigKV) GenBigZset() string { + t1 := time.Now() + key := "BigZset_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + member := &redis.Z{ + Score: rand.Float64(), + Member: gbkv.ValuePrefix + strconv.Itoa(i), + } + gbkv.RedisConn.ZAdd(key, member) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "zset"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKV) GenBigString() { + t1 := time.Now() + key := "BigString_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.Set(key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i), gbkv.EXPIRE) + } + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "string"), zap.String("keyprefix", key), zap.String("duration", t2.Sub(t1).String())) +} + +func (gbkv *GenBigKV) GenerateBaseDataParallel(client *redis.Client) map[string]string { + + global.RSPLog.Sugar().Info("Generate Base data Beging...") + bigkvmap := make(map[string]string) + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.DB = gbkv.DB + newgbkv.ValuePrefix = gbkv.ValuePrefix + bigkvmap[newgbkv.GenBigHash()] = "Hash" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigList()] = "List" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigSet()] = "Set" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigZset()] = "Zset" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + newgbkv.GenBigString() + wg.Done() + }() + wg.Wait() + return bigkvmap +} diff --git a/generatedata/genbigkv_test.go b/generatedata/genbigkv_test.go new file mode 100644 index 0000000..b0a2d50 --- /dev/null +++ b/generatedata/genbigkv_test.go @@ -0,0 +1,76 @@ +package generatedata + +import ( + "fmt" + "github.com/go-redis/redis/v7" + "math/rand" + "strconv" + + //"go.uber.org/zap" + "testcase/commons" + "testing" + "time" +) + +func TestGenerateBaseDataParallel(t *testing.T) { + saddr := "114.67.100.239:6379" + opt := &redis.Options{ + Addr: saddr, + DB: 0, // use default DB + } + opt.Password = "redistest0102" + client := commons.GetGoRedisClient(opt) + + defer client.Close() + + client.Set("aaa", "sa", 1000*time.Second) + client.LPush("lista", "tttt", "aa", "aadd") + + client.FlushAll() + + for i := 0; i < 100; i++ { + client.HSet("hash_1", "feild"+strconv.Itoa(i), "val"+strconv.Itoa(i)) + } + for i := 0; i < 100; i++ { + client.SAdd("set_1", "set"+strconv.Itoa(i)) + } + for i := 0; i < 100; i++ { + client.ZAdd("zset_1", &redis.Z{Score: rand.Float64(), Member: "member" + strconv.Itoa(i)}) + } + + fmt.Println(client.Type("hash_1")) + cursor := uint64(0) + for { + sourceresult, c, err := client.HScan("hash_1", cursor, "*", int64(1)).Result() + if err != nil { + fmt.Println(err) + } + + fmt.Println("output hash_1") + fmt.Println(c) + fmt.Println(sourceresult) + + cursor = c + if c == 0 { + break + } + } + + cursor = uint64(0) + for { + sourceresult, c, err := client.SScan("set_1", cursor, "*", int64(10)).Result() + if err != nil { + fmt.Println(err) + } + + fmt.Println("output hash_1") + fmt.Println(c) + fmt.Println(sourceresult) + + cursor = c + if c == 0 { + break + } + } + +} diff --git a/generatedata/genbigkvcluster.go b/generatedata/genbigkvcluster.go new file mode 100644 index 0000000..7e8f116 --- /dev/null +++ b/generatedata/genbigkvcluster.go @@ -0,0 +1,154 @@ +package generatedata + +import ( + "context" + "github.com/go-redis/redis/v7" + "go.uber.org/zap" + "math/rand" + "strconv" + "sync" + "testcase/global" + "time" +) + +type GenBigKVCluster struct { + RedisClusterClient *redis.ClusterClient + KeySuffix string + Length int + EXPIRE time.Duration + DB int + ValuePrefix string + DataGenInterval time.Duration +} + +func (gbkv *GenBigKVCluster) GenBigHash() string { + t1 := time.Now() + key := "BigHash_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisClusterClient.HSet(key, key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisClusterClient.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "hash"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKVCluster) GenBigList() string { + t1 := time.Now() + key := "BigList_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisClusterClient.LPush(key, gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisClusterClient.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "list"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + + return key +} + +func (gbkv *GenBigKVCluster) GenBigSet() string { + t1 := time.Now() + key := "BigSet_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisClusterClient.SAdd(key, gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisClusterClient.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "set"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKVCluster) GenBigZset() string { + t1 := time.Now() + key := "BigZset_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + member := &redis.Z{ + Score: rand.Float64(), + Member: gbkv.ValuePrefix + strconv.Itoa(i), + } + gbkv.RedisClusterClient.ZAdd(key, member) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisClusterClient.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "zset"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKVCluster) GenBigString() { + t1 := time.Now() + key := "BigString_" + gbkv.KeySuffix + for i := 0; i < 100; i++ { + gbkv.RedisClusterClient.Set(key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i), gbkv.EXPIRE) + time.Sleep(gbkv.DataGenInterval) + } + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "string"), zap.String("keyprefix", key), zap.String("duration", t2.Sub(t1).String())) +} + +func (gbkv *GenBigKVCluster) GenBigSingleExec() { + gbkv.GenBigHash() + gbkv.GenBigList() + gbkv.GenBigSet() + gbkv.GenBigZset() + gbkv.GenBigString() +} + +func (gbkv *GenBigKVCluster) KeepGenBigSingle(ctx context.Context) { + i := int64(0) + keySuffix := gbkv.KeySuffix + for { + gbkv.KeySuffix = keySuffix + "_" + strconv.FormatInt(i, 10) + gbkv.GenBigSingleExec() + i++ + select { + case <-ctx.Done(): + return + default: + continue + } + } +} + +func (gbkv *GenBigKVCluster) GenerateBaseDataParallelCluster() map[string]string { + + zaplogger.Sugar().Info("Generate Base data Beging...") + bigkvmap := make(map[string]string) + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + bigkvmap[gbkv.GenBigHash()] = "Hash" + wg.Done() + }() + + wg.Add(1) + go func() { + bigkvmap[gbkv.GenBigList()] = "List" + wg.Done() + }() + + wg.Add(1) + go func() { + bigkvmap[gbkv.GenBigSet()] = "Set" + wg.Done() + }() + + wg.Add(1) + go func() { + bigkvmap[gbkv.GenBigZset()] = "Zset" + wg.Done() + }() + + wg.Add(1) + go func() { + gbkv.GenBigString() + wg.Done() + }() + wg.Wait() + return bigkvmap +} diff --git a/generatedata/genbigsingle.go b/generatedata/genbigsingle.go new file mode 100644 index 0000000..f07e2fa --- /dev/null +++ b/generatedata/genbigsingle.go @@ -0,0 +1,189 @@ +package generatedata + +import ( + "context" + "github.com/go-redis/redis/v7" + "go.uber.org/zap" + "math/rand" + "strconv" + "sync" + "testcase/global" + "time" +) + +type GenBigKVSingle struct { + RedisConn *redis.Conn + KeySuffix string + Length int //set、hash等容器型key的长度 + EXPIRE time.Duration + DB int + ValuePrefix string + DataGenInterval time.Duration +} + +func (gbkv *GenBigKVSingle) GenBigHash() string { + t1 := time.Now() + key := "BigHash_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.HSet(key, key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "hash"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + + return key +} + +func (gbkv *GenBigKVSingle) GenBigList() string { + t1 := time.Now() + key := "BigList_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.LPush(key, gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "list"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKVSingle) GenBigSet() string { + t1 := time.Now() + key := "BigSet_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + gbkv.RedisConn.SAdd(key, gbkv.ValuePrefix+strconv.Itoa(i)) + time.Sleep(gbkv.DataGenInterval) + } + + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "set"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} +func (gbkv *GenBigKVSingle) GenBigZset() string { + t1 := time.Now() + key := "BigZset_" + gbkv.KeySuffix + for i := 0; i < gbkv.Length; i++ { + member := &redis.Z{ + Score: rand.Float64(), + Member: gbkv.ValuePrefix + strconv.Itoa(i), + } + gbkv.RedisConn.ZAdd(key, member) + time.Sleep(gbkv.DataGenInterval) + } + gbkv.RedisConn.Expire(key, gbkv.EXPIRE) + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "zset"), zap.String("key", key), zap.String("duration", t2.Sub(t1).String())) + return key +} + +func (gbkv *GenBigKVSingle) GenBigString() { + t1 := time.Now() + key := "BigString_" + gbkv.KeySuffix + for i := 0; i < 100; i++ { + gbkv.RedisConn.Set(key+strconv.Itoa(i), gbkv.ValuePrefix+strconv.Itoa(i), gbkv.EXPIRE) + time.Sleep(gbkv.DataGenInterval) + } + t2 := time.Now() + global.RSPLog.Info("GenBigKV", zap.Int("db", gbkv.DB), zap.String("keytype", "string"), zap.String("keyprefix", key), zap.String("duration", t2.Sub(t1).String())) +} + +// GenBigSingleExec 顺序执行所有命令,生成各个类型的大key +func (gbkv *GenBigKVSingle) GenBigSingleExec() { + gbkv.GenBigHash() + gbkv.GenBigList() + gbkv.GenBigSet() + gbkv.GenBigZset() + gbkv.GenBigString() +} + +func (gbkv *GenBigKVSingle) KeepGenBigSingle(ctx context.Context) { + i:=int64(0) + keySuffix:=gbkv.KeySuffix + for { + gbkv.KeySuffix=keySuffix+"_"+strconv.FormatInt(i,10) + gbkv.GenBigSingleExec() + i++ + select { + case <-ctx.Done(): + return + default: + continue + } + } +} + +func (gbkv *GenBigKVSingle) GenerateBaseDataParallel(client *redis.Client) map[string]string { + + global.RSPLog.Sugar().Info("Generate Base data Beging...") + bigkvmap := make(map[string]string) + wg := sync.WaitGroup{} + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.DB = gbkv.DB + newgbkv.ValuePrefix = gbkv.ValuePrefix + bigkvmap[newgbkv.GenBigHash()] = "Hash" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigList()] = "List" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigSet()] = "Set" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + bigkvmap[newgbkv.GenBigZset()] = "Zset" + wg.Done() + }() + + wg.Add(1) + go func() { + newgbkv := new(GenBigKV) + newgbkv.RedisConn = client.Conn() + newgbkv.KeySuffix = gbkv.KeySuffix + newgbkv.Length = gbkv.Length + newgbkv.EXPIRE = gbkv.EXPIRE + newgbkv.ValuePrefix = gbkv.ValuePrefix + newgbkv.DB = gbkv.DB + newgbkv.GenBigString() + wg.Done() + }() + wg.Wait() + return bigkvmap +} diff --git a/generatedata/gendata.go b/generatedata/gendata.go new file mode 100644 index 0000000..401170f --- /dev/null +++ b/generatedata/gendata.go @@ -0,0 +1,205 @@ +package generatedata + +import ( + "context" + "github.com/go-redis/redis/v7" + "runtime" + "sync" + "time" + + "testcase/commons" + "testcase/global" +) + +type TargetType int + +const ( + TargettypeSingle TargetType = 0 + TargettypeCluster TargetType = 1 +) + +func (tt TargetType) String() string { + switch tt { + case TargettypeSingle: + return "single" + case TargettypeCluster: + return "cluster" + default: + return "" + } +} + +type BigKey struct { + KeySuffixLen int `mapstructure:"keysuffixlen" json:"keysuffixlen" yaml:"keysuffixlen"` + Length int `mapstructure:"length" json:"length" yaml:"length"` + ValueSize int `mapstructure:"valuesize" json:"valuesize" yaml:"valuesize"` + Expire int64 `mapstructure:"expire" json:"expire" yaml:"expire"` + Duration int64 `mapstructure:"duaration" json:"duaration" yaml:"duaration"` + DataGenInterval int64 `mapstructure:"datageninterval" json:"datageninterval" yaml:"datageninterval"` +} + +type RandKey struct { + KeySuffixLen int `mapstructure:"keysuffixlen" json:"keysuffixlen" yaml:"keysuffixlen"` + //ValueSize int `mapstructure:"valuesize" json:"valuesize" yaml:"valuesize"` + Loopstep int `mapstructure:"loopstep" json:"loopstep" yaml:"loopstep"` + Expire int64 `mapstructure:"expire" json:"expire" yaml:"expire"` + Duration int64 `mapstructure:"duaration" json:"duaration" yaml:"duaration"` + DataGenInterval int64 `mapstructure:"datageninterval" json:"datageninterval" yaml:"datageninterval"` + Threads int `mapstructure:"threads" json:"threads" yaml:"threads"` +} + +type GenData struct { + TargetType TargetType `mapstructure:"type" json:"type" yaml:"type"` + Addr []string `mapstructure:"addr" json:"addr" yaml:"addr"` + Password string `mapstructure:"password" json:"password" yaml:"password"` + DB int `mapstructure:"db" json:"db" yaml:"db"` + BigKey *BigKey `mapstructure:"bigkey" json:"bigkey" yaml:"bigkey"` + RandKey *RandKey `mapstructure:"randkey" json:"randkey" yaml:"randkey"` +} + +func (gd *GenData) Exec() { + global.RSPLog.Sugar().Info("GenData execute") + if gd.TargetType == TargettypeSingle { + redisaddr := gd.Addr[0] + passwd := gd.Password + redisopt := &redis.Options{ + Addr: redisaddr, + DB: gd.DB, // use default DB + } + + if passwd != "" { + redisopt.Password = gd.Password + } + + client := commons.GetGoRedisClient(redisopt) + + _, err := client.Ping().Result() + + if err != nil { + global.RSPLog.Sugar().Error(err) + return + } + + if gd.BigKey != nil { + d := time.Now().Add(time.Duration(gd.BigKey.Duration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + keySuffix := commons.RandString(gd.BigKey.KeySuffixLen) + valuePrefix := commons.RandString(gd.BigKey.ValueSize) + genBigSingle := GenBigKVSingle{ + RedisConn: client.Conn(), + KeySuffix: keySuffix, + Length: gd.BigKey.Length, + EXPIRE: time.Duration(gd.BigKey.Expire) * time.Second, + DB: gd.DB, + ValuePrefix: valuePrefix, + DataGenInterval: time.Duration(gd.BigKey.DataGenInterval) * time.Millisecond, + } + + wg.Add(1) + go func() { + genBigSingle.KeepGenBigSingle(ctx) + wg.Done() + }() + wg.Wait() + } + + if gd.RandKey != nil { + threads := runtime.NumCPU() + if gd.RandKey.Threads > 0 { + threads = gd.RandKey.Threads + } + + d := time.Now().Add(time.Duration(gd.RandKey.Duration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + keySuffix := commons.RandString(gd.RandKey.KeySuffixLen) + optSingle := OptSingle{ + RedisConn: client.Conn(), + KeySuffix: keySuffix, + Loopstep: gd.RandKey.Loopstep, + EXPIRE: time.Duration(gd.RandKey.Expire) * time.Second, + DB: gd.DB, + } + + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + optSingle.KeepExecBasicOpt(ctx, time.Duration(gd.RandKey.DataGenInterval)*time.Millisecond, false) + wg.Done() + }() + } + wg.Wait() + } + } + + if gd.TargetType == TargettypeCluster { + opt := &redis.ClusterOptions{ + Addrs: gd.Addr, + } + if gd.Password != "" { + opt.Password = gd.Password + } + clusterClient := redis.NewClusterClient(opt) + + if gd.BigKey != nil { + d := time.Now().Add(time.Duration(gd.BigKey.Duration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + keySuffix := commons.RandString(gd.BigKey.KeySuffixLen) + valuePrefix := commons.RandString(gd.BigKey.ValueSize) + genBigKVCluster := GenBigKVCluster{ + RedisClusterClient: clusterClient, + KeySuffix: keySuffix, + Length: gd.BigKey.Length, + EXPIRE: time.Duration(gd.BigKey.Expire) * time.Second, + DB: gd.DB, + ValuePrefix: valuePrefix, + DataGenInterval: time.Duration(gd.BigKey.DataGenInterval) * time.Millisecond, + } + + wg.Add(1) + go func() { + genBigKVCluster.KeepGenBigSingle(ctx) + wg.Done() + }() + wg.Wait() + } + + if gd.RandKey != nil { + threads := runtime.NumCPU() + if gd.RandKey.Threads > 0 { + threads = gd.RandKey.Threads + } + + d := time.Now().Add(time.Duration(gd.RandKey.Duration) * time.Second) + ctx, cancel := context.WithDeadline(context.Background(), d) + defer cancel() + + wg := sync.WaitGroup{} + keySuffix := commons.RandString(gd.RandKey.KeySuffixLen) + optCluster := OptCluster{ + ClusterClient: clusterClient, + KeySuffix: keySuffix, + Loopstep: gd.RandKey.Loopstep, + EXPIRE: time.Duration(gd.RandKey.Expire) * time.Second, + DB: 0, + } + + for i := 0; i < threads; i++ { + wg.Add(1) + go func() { + optCluster.KeepExecBasicOptCluster(ctx, time.Duration(gd.RandKey.DataGenInterval)*time.Millisecond) + wg.Done() + }() + } + wg.Wait() + } + } +} \ No newline at end of file diff --git a/generatedata/generatebasedata.go b/generatedata/generatebasedata.go new file mode 100644 index 0000000..a7d818e --- /dev/null +++ b/generatedata/generatebasedata.go @@ -0,0 +1,1106 @@ +package generatedata + +import ( + "github.com/go-redis/redis/v7" + "github.com/panjf2000/ants/v2" + "go.uber.org/zap" + "math/rand" + "strconv" + "sync" + "testcase/global" + "time" +) +import "testcase/commons" + +var logger = global.RSPLog + +func GenerateBase(client *redis.Client, loopstep int64) { + + p, _ := ants.NewPool(4, ants.WithMaxBlockingTasks(4)) + defer p.Release() + + var wg sync.WaitGroup + basevalue := commons.RandString(256 * 1024) + prefix := commons.GetUUID() + + //生成biglist + logger.Info("start make big list") + listname := prefix + "list_" + commons.RandString(8) + wg.Add(1) + biglistfunc := func() { + defer wg.Done() + t1 := time.Now() + for i := int64(0); i < loopstep; i++ { + client.LPush(listname, basevalue+strconv.FormatInt(i, 10)) + } + t2 := time.Now() + logger.Sugar().Info("Finish make big list ", t2.Sub(t1)) + } + p.Submit(biglistfunc) + + //生成string kv + logger.Sugar().Info("make strings") + //basestring := commons.RandString(128) + wg.Add(1) + makestringfunc := func() { + defer wg.Done() + t1 := time.Now() + for i := int64(0); i < loopstep; i++ { + client.Set(prefix+"str"+strconv.FormatInt(i, 10), basevalue, time.Duration(3600*time.Second)) + } + t2 := time.Now() + logger.Sugar().Info("Finish make strings ", t2.Sub(t1)) + } + p.Submit(makestringfunc) + + //生成set + logger.Sugar().Info("start make big set") + setname := prefix + "set_" + commons.RandString(10) + bigsetfunc := func() { + defer wg.Done() + t1 := time.Now() + for i := int64(0); i < loopstep; i++ { + client.SAdd(setname, basevalue+strconv.FormatInt(i, 10)) + } + t2 := time.Now() + logger.Sugar().Info("Finish make big set ", t2.Sub(t1)) + } + wg.Add(1) + p.Submit(bigsetfunc) + + //生成hset + logger.Sugar().Info("start make big hashset") + hsetname := prefix + "hset_" + commons.RandString(10) + makebighsetfunc := func() { + defer wg.Done() + t1 := time.Now() + for i := int64(0); i < loopstep; i++ { + client.HSet(hsetname, basevalue+strconv.FormatInt(i, 10), i) + } + t2 := time.Now() + + logger.Sugar().Info("Finis make big hashset ", t2.Sub(t1)) + } + wg.Add(1) + p.Submit(makebighsetfunc) + + //生成sortedSet + logger.Sugar().Info("start make big sorted set") + sortedsetname := prefix + "sortedset_" + commons.RandString(8) + makebigsortedsetfunc := func() { + defer wg.Done() + t1 := time.Now() + for i := int64(0); i < loopstep; i++ { + member := &redis.Z{ + Score: float64(i), + Member: basevalue + strconv.FormatInt(i, 10), + } + client.ZAdd(sortedsetname, member) + + } + t2 := time.Now() + logger.Sugar().Info("finish make big sorted set ", t2.Sub(t1)) + } + wg.Add(1) + p.Submit(makebigsortedsetfunc) + + wg.Wait() +} + +//基本类型增量测试 +func GenerateIncrement(client *redis.Client) { + + p, _ := ants.NewPool(8, ants.WithMaxBlockingTasks(8)) + defer p.Release() + + var wg sync.WaitGroup + + //APPEND + wg.Add(1) + appendfunc := func() { + t1 := time.Now() + appended := "append_" + commons.RandString(16) + for i := 0; i < 10; i++ { + client.Append(appended, strconv.Itoa(i)) + } + client.Expire(appended, 3600*time.Second) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "append", + //}).Info(t2.Sub(t1)) + logger.Info("command_append", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(appendfunc) + + //BITOP + wg.Add(1) + bitopfunc := func() { + t1 := time.Now() + strarry := []string{} + opandkey := "opand_" + commons.RandString(8) + oporkey := "opor_" + commons.RandString(8) + opxorkey := "opxor_" + commons.RandString(8) + opnotkey := "opnot_" + commons.RandString(8) + for i := 0; i < 20; i++ { + bitopkey := "bitop_" + commons.RandString(16) + client.Do("set", bitopkey, commons.RandString(16)) + strarry = append(strarry, bitopkey) + } + + client.BitOpAnd(opandkey, strarry...) + client.BitOpOr(oporkey, strarry...) + client.BitOpXor(opxorkey, strarry...) + client.BitOpNot(opnotkey, strarry[0]) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "bitop", + //}).Info(t2.Sub(t1)) + logger.Info("command_bitop", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(bitopfunc) + + //DECR and DECRBY + wg.Add(1) + decrfunc := func() { + t1 := time.Now() + strarry := []string{} + for i := 0; i < 100; i++ { + desckey := "desc_" + commons.RandString(16) + client.Do("set", desckey, rand.Intn(200)) + strarry = append(strarry, desckey) + } + + for _, str := range strarry { + client.Decr(str) + client.DecrBy(str, rand.Int63n(300)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "decr", + //}).Info(t2.Sub(t1)) + logger.Info("command_decr", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(decrfunc) + + //INCR and INCRBY and INCRBYFLOAT + wg.Add(1) + incrfunc := func() { + t1 := time.Now() + strarry := []string{} + for i := 0; i < 100; i++ { + desckey := "incr_" + commons.RandString(16) + client.Do("set", desckey, rand.Intn(200)) + strarry = append(strarry, desckey) + } + for _, str := range strarry { + client.Incr(str) + client.IncrBy(str, rand.Int63n(300)) + client.IncrByFloat(str, rand.Float64()) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "incr", + //}).Info(t2.Sub(t1)) + logger.Info("command_incr", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(incrfunc) + + //MSET and MSETNX + wg.Add(1) + msetfunc := func() { + t1 := time.Now() + msetarry := []string{} + msetnxarry := []string{} + + for i := 0; i < 100; i++ { + msetkv := "mset_" + commons.RandString(16) + msetarry = append(msetarry, msetkv) + } + + for i := 0; i < 100; i++ { + msetnxkv := "msetnx_" + commons.RandString(16) + msetnxarry = append(msetnxarry, msetnxkv) + } + + client.MSetNX(msetnxarry) + client.MSet(msetarry) + client.MSetNX(msetarry) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "mset", + //}).Info(t2.Sub(t1)) + logger.Info("command_mset", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(msetfunc) + + //PSETEX + wg.Add(1) + psetnexfuc := func() { + t1 := time.Now() + psetexbaseval := commons.RandString(64) + for i := 0; i < 100; i++ { + psetexkey := "psetex_" + commons.RandString(16) + client.Do("psetex", psetexkey, 3600000, psetexbaseval+strconv.Itoa(i)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "psetex", + //}).Info(t2.Sub(t1)) + logger.Info("command_psetex", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(psetnexfuc) + + //SETEX + wg.Add(1) + setexfunc := func() { + t1 := time.Now() + for i := 0; i < 100; i++ { + setexkey := "setex_" + commons.RandString(16) + client.Do("setex", setexkey, 3600, setexkey) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "setex", + //}).Info(t2.Sub(t1)) + logger.Info("command_setex", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(setexfunc) + + //SET + wg.Add(1) + setfunc := func() { + t1 := time.Now() + setbaseval := commons.RandString(64) + for i := 0; i < 1000; i++ { + setkey := "set_" + commons.RandString(16) + client.Set(setkey, setbaseval+strconv.Itoa(i), time.Duration(3600*time.Second)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "set", + //}).Info(t2.Sub(t1)) + logger.Info("command_set", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(setfunc) + + //SETNX + wg.Add(1) + setnxfunc := func() { + t1 := time.Now() + for i := 0; i < 100; i++ { + setnxkey := "setnx_" + commons.RandString(16) + client.SetNX(setnxkey, setnxkey, time.Duration(3600*time.Second)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "setnx", + //}).Info(t2.Sub(t1)) + logger.Info("command_setnx", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(setnxfunc) + + //SETBIT + wg.Add(1) + setbitfunc := func() { + t1 := time.Now() + for i := 0; i < 100; i++ { + setbitkey := "setbit_" + commons.RandString(16) + client.SetBit(setbitkey, rand.Int63n(100), rand.Intn(1)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "setbit", + //}).Info(t2.Sub(t1)) + logger.Info("command_setbit", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(setbitfunc) + + //SETRANGE + wg.Add(1) + setrangefunc := func() { + t1 := time.Now() + strarry := []string{} + setrangebaseval := commons.RandString(4) + for i := 0; i < 100; i++ { + setrangekey := "setrange_" + commons.RandString(16) + client.SetRange(setrangekey, rand.Int63n(16), setrangebaseval+strconv.Itoa(i)) + strarry = append(strarry, setrangekey) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "setrange", + //}).Info(t2.Sub(t1)) + logger.Info("command_setrange", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(setrangefunc) + + //HINCRBY + wg.Add(1) + hicrbyfunc := func() { + t1 := time.Now() + for i := 0; i < 100; i++ { + client.HIncrBy("HINCRBY_"+strconv.Itoa(i), "page_view", rand.Int63n(100)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "hincrby", + //}).Info(t2.Sub(t1)) + logger.Info("command_hincrby", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(hicrbyfunc) + + //HINCRBYFLOAT + wg.Add(1) + hincrbyfloatfunc := func() { + t1 := time.Now() + for i := 0; i < 100; i++ { + client.HIncrByFloat("HINCRBY_"+commons.RandString(4), "page_view", rand.Float64()) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "hincrbyfloat", + //}).Info(t2.Sub(t1)) + logger.Info("command_hincrbyfloat", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(hincrbyfloatfunc) + + //HMSET + wg.Add(1) + hmsetfunc := func() { + t1 := time.Now() + fieldmap := make(map[string]interface{}) + for i := 0; i < 20; i++ { + key := commons.RandString(8) + fieldmap[key] = "wwww." + key + ".com" + } + client.HMSet("hmset_"+commons.RandString(4), fieldmap) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "hmset", + //}).Info(t2.Sub(t1)) + logger.Info("command_hmset", zap.Duration("", t2.Sub(t1))) + wg.Done() + } + p.Submit(hmsetfunc) + + //HSETNX + wg.Add(1) + hsetnxfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "hsetnx_" + commons.RandString(8) + basefield := commons.RandString(8) + for i := 0; i < 20; i++ { + client.HSetNX(basekey, basefield+strconv.Itoa(i), basefield) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "hsetnx", + //}).Info(t2.Sub(t1)) + logger.Info("command_hsetnx", zap.Duration("", t2.Sub(t1))) + + } + p.Submit(hsetnxfunc) + + //LPUSH + wg.Add(1) + lpushfunc := func() { + defer wg.Done() + t1 := time.Now() + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + client.LPush("lpush_"+commons.RandString(4), values...) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "lpush", + //}).Info(t2.Sub(t1)) + logExecDuration("command_lpush", t2.Sub(t1)) + } + p.Submit(lpushfunc) + + //LPOP + wg.Add(1) + lpopfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "lpop_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + + for _, v := range keys { + client.LPop(v) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "lpop", + //}).Info(t2.Sub(t1)) + logExecDuration("command_lpop", t2.Sub(t1)) + } + p.Submit(lpopfunc) + + //LPUSHX + wg.Add(1) + lpushxfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "lpushx_" + commons.RandString(8) + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + client.LPushX(basekey, values) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "lpushx", + //}).Info(t2.Sub(t1)) + logExecDuration("command_lpushx", t2.Sub(t1)) + } + p.Submit(lpushxfunc) + + //LREM + wg.Add(1) + lremfunc := func() { + t1 := time.Now() + defer wg.Done() + basekey := "lrem_" + commons.RandString(4) + for i := 0; i < 50; i++ { + client.LPush(basekey, i) + } + + for i := 0; i < 10; i++ { + client.LRem(basekey, 0, rand.Intn(20)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "lrem", + //}).Info(t2.Sub(t1)) + logExecDuration("command_lrem", t2.Sub(t1)) + } + p.Submit(lremfunc) + + //LSET + wg.Add(1) + lsetfunc := func() { + t1 := time.Now() + defer wg.Done() + basekey := "lrem_" + commons.RandString(4) + for i := 0; i < 50; i++ { + client.LPush(basekey, i) + } + + for i := 0; i < 10; i++ { + client.LSet(basekey, rand.Int63n(49), commons.RandString(4)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "lset", + //}).Info(t2.Sub(t1)) + logExecDuration("command_lset", t2.Sub(t1)) + } + p.Submit(lsetfunc) + + //LTRIM + wg.Add(1) + ltrimfunc := func() { + t1 := time.Now() + defer wg.Done() + basekey := "ltrim_" + commons.RandString(4) + values := make([]interface{}, 50) + keys := []string{} + for i := 0; i < len(values); i++ { + values[i] = i + } + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + + for _, v := range keys { + client.LTrim(v, rand.Int63n(49), rand.Int63n(49)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "ltrim", + //}).Info(t2.Sub(t1)) + logExecDuration("command_ltrim", t2.Sub(t1)) + } + p.Submit(ltrimfunc) + + //LINSERT + wg.Add(1) + linsertfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "linsert_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + + for _, v := range keys { + client.LInsert(v, "AFTER", rand.Intn(40), v) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "linsert", + //}).Info(t2.Sub(t1)) + logExecDuration("command_linsert", t2.Sub(t1)) + } + p.Submit(linsertfunc) + + //RPOP + wg.Add(1) + lrpopfunc := func() { + t1 := time.Now() + defer wg.Done() + basekey := "rpop_" + commons.RandString(4) + values := make([]interface{}, 50) + keys := []string{} + for i := 0; i < len(values); i++ { + values[i] = i + } + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.RPush(key, values...) + keys = append(keys, key) + } + + for _, v := range keys { + client.RPop(v) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "rpop", + //}).Info(t2.Sub(t1)) + logExecDuration("command_rpop", t2.Sub(t1)) + } + p.Submit(lrpopfunc) + + //RPOPLPUSH + wg.Add(1) + rpoplpushfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "rpoplpush_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + + for k, v := range keys { + client.RPopLPush(v, v+strconv.Itoa(k)) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "rpoplpush", + //}).Info(t2.Sub(t1)) + logExecDuration("command_rpoplpush", t2.Sub(t1)) + } + p.Submit(rpoplpushfunc) + + //RPUSHX + wg.Add(1) + rpushxfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "rpushx_" + commons.RandString(8) + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + client.RPushX(basekey, values) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "rpushx", + //}).Info(t2.Sub(t1)) + logExecDuration("command_rpushx", t2.Sub(t1)) + } + p.Submit(rpushxfunc) + + //BLPOP + wg.Add(1) + blpopfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "blpop_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + client.BLPop(5*time.Second, keys...) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "blpop", + //}).Info(t2.Sub(t1)) + logExecDuration("command_blpop", t2.Sub(t1)) + } + p.Submit(blpopfunc) + + //BRPOP + wg.Add(1) + brpopfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "brpop_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + client.BRPop(5*time.Second, keys...) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "brpop", + //}).Info(t2.Sub(t1)) + logExecDuration("command_brpop", t2.Sub(t1)) + } + p.Submit(brpopfunc) + + //BRPOPLPUSH + wg.Add(1) + brpoplpushfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "brpoplpush_" + commons.RandString(8) + keys := []string{} + values := make([]interface{}, 40) + for i := 0; i < len(values); i++ { + values[i] = i + } + + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.LPush(key, values...) + keys = append(keys, key) + } + + for k, v := range keys { + client.BRPopLPush(v, v+strconv.Itoa(k), 30*time.Second) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "brpoplpush", + //}).Info(t2.Sub(t1)) + logExecDuration("command_brpoplpush", t2.Sub(t1)) + } + p.Submit(brpoplpushfunc) + + //SADD + wg.Add(1) + saddfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "sadd_" + commons.RandString(4) + for i := 0; i < 50; i++ { + client.SAdd(basekey, basekey+strconv.Itoa(i)) + } + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "sadd", + //}).Info(t2.Sub(t1)) + logExecDuration("command_sadd", t2.Sub(t1)) + } + p.Submit(saddfunc) + + //SDIFFSTORE + wg.Add(1) + sdiffstorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "sdiffstore_" + commons.RandString(4) + keys := []string{} + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.SAdd(key, strconv.Itoa(rand.Intn(10))) + keys = append(keys, key) + } + client.SDiffStore(basekey+commons.RandString(2), keys...) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "sdiffstore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_sdiffstore", t2.Sub(t1)) + } + p.Submit(sdiffstorefunc) + + //SINTERSTORE + wg.Add(1) + sinsertstorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "sinsertstore_" + commons.RandString(4) + keys := []string{} + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.SAdd(key, strconv.Itoa(rand.Intn(10))) + keys = append(keys, key) + } + client.SDiffStore(basekey+commons.RandString(2), keys...) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "sinsertstore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_sinsertstore", t2.Sub(t1)) + } + p.Submit(sinsertstorefunc) + + //SMOVE + wg.Add(1) + smovefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "smove_" + commons.RandString(4) + values := []string{} + for i := 0; i < 50; i++ { + value := basekey + strconv.Itoa(i) + client.SAdd(basekey, value) + values = append(values, value) + } + for i := 0; i < 10; i++ { + client.SMove(basekey, basekey+strconv.Itoa(i), values[i+1]) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "smove", + //}).Info(t2.Sub(t1)) + logExecDuration("command_smove", t2.Sub(t1)) + } + p.Submit(smovefunc) + + //SPOP + wg.Add(1) + spopfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "spop_" + commons.RandString(4) + + for i := 0; i < 50; i++ { + value := basekey + strconv.Itoa(i) + client.SAdd(basekey, value) + + } + for i := 0; i < 10; i++ { + client.SPop(basekey) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "spop", + //}).Info(t2.Sub(t1)) + logExecDuration("command_spop", t2.Sub(t1)) + } + p.Submit(spopfunc) + + //SREM + wg.Add(1) + sremfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "srem_" + commons.RandString(4) + + for i := 0; i < 50; i++ { + + client.SAdd(basekey, i) + + } + for i := 0; i < 10; i++ { + client.SRem(basekey, i+1) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "srem", + //}).Info(t2.Sub(t1)) + logExecDuration("command_srem", t2.Sub(t1)) + } + p.Submit(sremfunc) + + //SUNIONSTORE + wg.Add(1) + sunionstorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "sunionstore_" + commons.RandString(4) + keys := []string{} + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + client.SAdd(key, strconv.Itoa(rand.Intn(10))) + keys = append(keys, key) + } + client.SUnionStore(basekey, keys...) + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "sunionstore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_sunionstore", t2.Sub(t1)) + } + p.Submit(sunionstorefunc) + + //ZADD + wg.Add(1) + zaddfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zadd_" + commons.RandString(4) + + members := []*redis.Z{} + for i := 0; i < 50; i++ { + member := redis.Z{ + Score: rand.Float64(), + Member: basekey + strconv.Itoa(i), + } + members = append(members, &member) + } + client.ZAdd(basekey, members...) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zadd", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zadd", t2.Sub(t1)) + } + p.Submit(zaddfunc) + + //ZINCRBY + wg.Add(1) + zincrbyfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zincrby_" + commons.RandString(4) + members := []*redis.Z{} + for i := 0; i < 50; i++ { + member := redis.Z{ + Score: rand.Float64(), + Member: basekey + strconv.Itoa(i), + } + members = append(members, &member) + } + client.ZAdd(basekey, members...) + + for _, v := range members { + client.ZIncrBy(basekey, rand.Float64()*10, v.Member.(string)) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zincrby", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zincrby", t2.Sub(t1)) + } + p.Submit(zincrbyfunc) + + //ZREM + wg.Add(1) + zremfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zrem_" + commons.RandString(4) + members := []*redis.Z{} + for i := 0; i < 50; i++ { + member := redis.Z{ + Score: rand.Float64(), + Member: basekey + strconv.Itoa(i), + } + members = append(members, &member) + } + client.ZAdd(basekey, members...) + + for i := 0; i < 10; i++ { + client.ZRem(basekey, members[i].Member) + } + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zrem", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zrem", t2.Sub(t1)) + } + p.Submit(zremfunc) + + //ZREMRANGEBYRANK + wg.Add(1) + zremrangebyrankfunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zremrangebyrank_" + commons.RandString(4) + members := []*redis.Z{} + for i := 0; i < 50; i++ { + member := redis.Z{ + Score: rand.Float64(), + Member: basekey + strconv.Itoa(i), + } + members = append(members, &member) + } + client.ZAdd(basekey, members...) + + client.ZRemRangeByRank(basekey, rand.Int63n(10), rand.Int63n(50)) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zremrangebyrank", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zremrangebyrank", t2.Sub(t1)) + } + p.Submit(zremrangebyrankfunc) + + //ZREMRANGEBYSCORE + wg.Add(1) + zremrangebyscorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zremrangebyscore_" + commons.RandString(4) + members := []*redis.Z{} + for i := 0; i < 50; i++ { + member := redis.Z{ + Score: float64(i) + 1, + Member: basekey + strconv.Itoa(i), + } + members = append(members, &member) + } + client.ZAdd(basekey, members...) + + client.ZRemRangeByScore(basekey, "10", "20") + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zremrangebyscore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zremrangebyscore", t2.Sub(t1)) + } + p.Submit(zremrangebyscorefunc) + + //ZUNIONSTORE + wg.Add(1) + zunionstorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zunionstore_" + commons.RandString(4) + keys := []string{} + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + member := redis.Z{ + Score: float64(i) + 1, + Member: basekey + strconv.Itoa(i), + } + client.ZAdd(key, &member) + keys = append(keys, key) + + } + + zstore := redis.ZStore{ + Keys: keys, + Aggregate: "sum", + Weights: []float64{1.0}, + } + + client.ZUnionStore(basekey+commons.RandString(2), &zstore) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zunionstore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zunionstore", t2.Sub(t1)) + } + p.Submit(zunionstorefunc) + + //ZINTERSTORE + wg.Add(1) + zinterstorefunc := func() { + defer wg.Done() + t1 := time.Now() + basekey := "zinterstore_" + commons.RandString(4) + keys := []string{} + for i := 0; i < 50; i++ { + key := basekey + strconv.Itoa(i) + member := redis.Z{ + Score: float64(i), + Member: basekey, + } + client.ZAdd(key, &member) + keys = append(keys, key) + + } + + zstore := redis.ZStore{ + Keys: keys, + Aggregate: "sum", + Weights: []float64{1.0}, + } + + client.ZInterStore(basekey+commons.RandString(2), &zstore) + + t2 := time.Now() + //logger.WithFields(logrus.Fields{ + // "command": "zinterstore", + //}).Info(t2.Sub(t1)) + logExecDuration("command_zinterstore", t2.Sub(t1)) + } + p.Submit(zinterstorefunc) + + wg.Wait() + +} + +func logExecDuration(msg string, duration time.Duration) { + logger.Info(msg, zap.Duration("", duration)) +} diff --git a/global/consts.go b/global/consts.go new file mode 100644 index 0000000..90f7859 --- /dev/null +++ b/global/consts.go @@ -0,0 +1,214 @@ +package global + +// TaskStatusType 任务状态类型 +type TaskStatusType int + +// TaskType 任务类型 +type TaskType int + +const ( + TasksTaskIDPrefix = "/tasks/taskid/" //TasksTaskidPrefix 任务id前缀 key:/tasks/taskid/{taskid} ;value: taskstatusjson + TasksNodePrefix = "/tasks/node/" //TasksNodePrefix key:/tasks/node/{nodeId}/{taskId}; value:{"nodeId":"xxx","taskId":"xxx"} + TasksGroupIDPrefix = "/tasks/groupid/" //TasksGroupidPrefix key:/tasks/groupid/{groupid}/{taskId};value:{"groupId":"xxx","taskId":"xxx"} + TasksStatusPrefix = "/tasks/status/" //TasksStatusPrefix key:/tasks/status/{currentstatus}/{taskid};value:{"taskId":"testId"} + TasksRdbversionPrefix = "/tasks/rdbversion/" //TasksRdbversionPrefix key:/tasks/rdbversion/{redisVersion}/{rdbVersion};value:{"id":1,"redis_version": "2.6","rdb_version": 6} + TasksOffsetPrefix = "/tasks/offset/" //TasksOffsetPrefix key:/tasks/offset/{taskId};value:{"replId":"xxx","replOffset":"-1"} + TasksNamePrefix = "/tasks/name/" //TasksNamePrefix key:/tasks/name/{taskname};value:{"taskId":"testId"} + TasksTypePrefix = "/tasks/type/" //TasksTypePrefix key:/tasks/type/{type}/{taskId};value:{"taskid":"xxx","groupId":"xxx","nodeId":"xxx"} + TasksBigkeyPrefix = "/tasks/bigkey/" //TasksBigkeyPrefix key:/tasks/bigkey/{taskId}/{bigKey};value:{"id":1,"taskId":"xxx","command":"xxx","command_type":"xxx"} + TasksMd5Prefix = "/tasks/md5/" // TasksMd5Prefix key:/tasks/md5/{md5};value:{"taskid":"xxx","groupId":"xxx","nodeId":"xx"} + NodesPrefix = "/nodes/" // NodesPrefix key:/nodes/{nodetype}/{nodeID};value:{"nodeaddr":"10.0.0.1","nodeport":8888,"online":true,"lastreporttime":1615275888064} + CursorPrefix = "/cursor/" // CursorPrefix key:/cursor/{qureryID};value:{"nodeaddr":"10.0.0.1","nodeport":8888,"online":true,"lastreporttime":1615275888064} + LastKeyAcrossPrefix = "/tasks/lastkeyacross/" //LastKeyAcrossPrefix key:/tasks/lastkeyacross/{taskId};value:{"lastKeyCommitTime": 1,"lastKeyUpdateTime": 1,"taskId":"xxx","groupId":"xxx"} +) + +const ( + //IDSeed id种子 + IDSeed = "/uniqid/idseed" +) + +const ( + // NodeTypePortal 节点类型 portal + NodeTypePortal = "portal" + // NodeTypeRedissyncer 节点类型 redissyncernodeserver + NodeTypeRedissyncer = "redissyncernodeserver" +) + +//任务状态 +const ( + // TaskStatusTypeSTOP STOP 0 任务停止状态 + TaskStatusTypeSTOP TaskStatusType = 0 + // TaskStatusTypeCREATING CREATING 1 任务创建中 + TaskStatusTypeCREATING TaskStatusType = 1 + // TaskStatusTypeCREATED CREATED 2 任务创建完成 + TaskStatusTypeCREATED TaskStatusType = 2 + // TaskStatusTypeSTARTING 3 任务运行中,表示数据同步以前,发送psync命令,源redis进行bgsave 生成rdb的过程;描述不太贴切,待改进 + TaskStatusTypeSTARTING TaskStatusType = 3 + // TaskStatusTypeBROKEN BROKEN 5 任务异常 + TaskStatusTypeBROKEN TaskStatusType = 5 + // TaskStatusTypeRDBRUNNING RDBRUNNING 6 全量RDB同步过程中 + TaskStatusTypeRDBRUNNING TaskStatusType = 6 + // TaskStatusTypeCOMMANDRUNNING COMMANDRUNNING 7 增量同步中 + TaskStatusTypeCOMMANDRUNNING TaskStatusType = 7 + // TaskStatusTypeFINISH FINISH 8 + TaskStatusTypeFINISH TaskStatusType = 8 +) + +//任务类型 +const ( + // TaskTypeSYNC SYNC 1 replication 已使用 + TaskTypeSYNC TaskType = 1 + // TaskTypeRDB RDB 2 RDB文件解析 已使用 + TaskTypeRDB TaskType = 2 + // TaskTypeAOF AOF 3 AOF文件解析 已使用 + TaskTypeAOF TaskType = 3 + // TaskTypeMIXED MIXED 4 混合文件解析 已使用 + TaskTypeMIXED TaskType = 4 + // TaskTypeONLINERDB ONLINERDB 5 在线RDB解析 已使用 + TaskTypeONLINERDB TaskType = 5 + // TaskTypeONLINEAOF ONLINEAOF 6 在线AOF 已使用 + TaskTypeONLINEAOF TaskType = 6 + // TaskTypeONLINEMIXED ONLINEMIXED 7 在线混合文件解析 已使用 + TaskTypeONLINEMIXED TaskType = 7 + // TaskTypeCOMMANDDUMPUP COMMANDDUMPUP 8 增量命令实时备份 已使用 + TaskTypeCOMMANDDUMPUP TaskType = 8 +) + +func (taskStatusType TaskStatusType) String() string { + switch taskStatusType { + case TaskStatusTypeSTOP: + return "STOP" + case TaskStatusTypeCREATING: + return "CREATING" + case TaskStatusTypeCREATED: + return "CREATED" + case TaskStatusTypeSTARTING: + return "STARTING" + case TaskStatusTypeBROKEN: + return "BROKEN" + case TaskStatusTypeRDBRUNNING: + return "RDBRUNNING" + case TaskStatusTypeCOMMANDRUNNING: + return "COMMANDRUNNING" + case TaskStatusTypeFINISH: + return "FINISH" + default: + return "" + } +} + +func (taskType TaskType) String() string { + switch taskType { + case TaskTypeSYNC: + return "SYNC" + case TaskTypeRDB: + return "RDB" + case TaskTypeAOF: + return "AOF" + case TaskTypeMIXED: + return "MIXED" + case TaskTypeONLINERDB: + return "ONLINERDB" + case TaskTypeONLINEAOF: + return "ONLINEAOF" + case TaskTypeONLINEMIXED: + return "ONLINEMIXED" + case TaskTypeCOMMANDDUMPUP: + return "COMMANDDUMPUP" + default: + return "" + } +} + +// TaskStatus 任务状态 +type TaskStatus struct { + Afresh bool `mapstructure:"afresh" json:"afresh" yaml:"afresh"` + AllKeyCount int64 `mapstructure:"allKeyCount" json:"allKeyCount" yaml:"allKeyCount"` + AutoStart bool `mapstructure:"autostart" json:"autostart" yaml:"autostart"` + BatchSize int64 `mapstructure:"batchSize" json:"batchSize" yaml:"batchSize"` + CommandFilter string `mapstructure:"commandFilter" json:"commandFilter" yaml:"commandFilter"` + CreateTime string `mapstructure:"createTime" json:"createTime" yaml:"createTime"` + DBMapper interface{} `mapstructure:"dbMapper" json:"dbMapper" yaml:"dbMapper"` + //DBMapping interface{} `mapstructure:"dbMapping" json:"dbMapping" yaml:"dbMapping"` + ErrorCount int64 `mapstructure:"errorCount" json:"errorCount" yaml:"errorCount"` + ExpandJSON string `mapstructure:"expandJson" json:"expandJson" yaml:"expandJson"` + FileAddress string `mapstructure:"fileAddress" json:"fileAddress" yaml:"fileAddress"` + FilterType string `mapstructure:"filterType" json:"filterType" yaml:"filterType"` + GroupID string `mapstructure:"groupId" json:"groupId" yaml:"groupId"` + ID string `mapstructure:"id" json:"id" yaml:"id"` + KeyFilter string `mapstructure:"keyFilter" json:"keyFilter" yaml:"keyFilter"` + //LastKeyCommitTime int64 `mapstructure:"lastKeyCommitTime" json:"lastKeyCommitTime" yaml:"lastKeyCommitTime"` + //LastKeyUpdateTime int64 `mapstructure:"lastKeyUpdateTime" json:"lastKeyUpdateTime" yaml:"lastKeyUpdateTime"` + MD5 string `mapstructure:"md5" json:"md5" yaml:"md5"` + NodeID string `mapstructure:"nodeId" json:"nodeId" yaml:"nodeId"` + Offset int64 `mapstructure:"offset" json:"offset" yaml:"offset"` + OffsetPlace int `mapstructure:"offsetPlace" json:"offsetPlace" yaml:"offsetPlace"` + RdbKeyCount int64 `mapstructure:"rdbKeyCount" json:"rdbKeyCount" yaml:"rdbKeyCount"` + RdbVersion int `mapstructure:"rdbVersion" json:"rdbVersion" yaml:"rdbVersion"` + RealKeyCount int64 `mapstructure:"realKeyCount" json:"realKeyCount" yaml:"realKeyCount"` + RedisVersion float64 `mapstructure:"redisVersion" json:"redisVersion" yaml:"redisVersion"` + ReplID string `mapstructure:"replId" json:"replId" yaml:"replId"` + SourceACL bool `mapstructure:"sourceAcl" json:"sourceAcl" yaml:"sourceAcl"` + SourceHost string `mapstructure:"sourceHost" json:"sourceHost" yaml:"sourceHost"` + SourcePassword string `mapstructure:"sourcePassword" json:"sourcePassword" yaml:"sourcePassword"` + SourcePort int `mapstructure:"sourcePort" json:"sourcePort" yaml:"sourcePort"` + SourceRedisAddress string `mapstructure:"sourceRedisAddress" json:"sourceRedisAddress" yaml:"sourceRedisAddress"` + SourceRedisType int `mapstructure:"sourceRedisType" json:"sourceRedisType" yaml:"sourceRedisType"` + SourceURI string `mapstructure:"sourceUri" json:"sourceUri" yaml:"sourceUri"` + SourceUserName string `mapstructure:"sourceUserName" json:"sourceUserName" yaml:"sourceUserName"` + Status int `mapstructure:"status" json:"status" yaml:"status"` + SyncType int `mapstructure:"syncType" json:"syncType" yaml:"syncType"` + TargetACL bool `mapstructure:"targetAcl" json:"targetAcl" yaml:"targetAcl"` + TargetHost string `mapstructure:"targetHost" json:"targetHost" yaml:"targetHost"` + TargetPassword string `mapstructure:"targetPassword" json:"targetPassword" yaml:"targetPassword"` + TargetPort int `mapstructure:"targetPort" json:"targetPort" yaml:"targetPort"` + TargetRedisAddress string `mapstructure:"targetRedisAddress" json:"targetRedisAddress" yaml:"targetRedisAddress"` + TargetRedisType int `mapstructure:"targetRedisType" json:"targetRedisType" yaml:"targetRedisType"` + TargetURI []string `mapstructure:"targetUri" json:"targetUri" yaml:"targetUri"` + TargetUserName string `mapstructure:"targetUserName" json:"targetUserName" yaml:"targetUserName"` + TaskID string `mapstructure:"taskId" json:"taskId" yaml:"taskId"` + TaskMsg string `mapstructure:"taskMsg" json:"taskMsg" yaml:"taskMsg"` + TaskName string `mapstructure:"taskName" json:"taskName" yaml:"taskName"` + TaskType int `mapstructure:"tasktype" json:"tasktype" yaml:"tasktype"` + TimeDeviation int64 `mapstructure:"timeDeviation" json:"timeDeviation" yaml:"timeDeviation"` + UpdateTime string `mapstructure:"updateTime" json:"updateTime" yaml:"updateTime"` +} + +type TasksOffset struct { + ReplID string `mapstructure:"replId" json:"replId" yaml:"replId"` + ReplOffset int64 `mapstructure:"replOffset" json:"replOffset" yaml:"replOffset"` +} + +type TaskIDVal struct { + TaskID string `mapstructure:"taskId" json:"taskId" yaml:"taskId"` +} + +type TasksNodeVal struct { + NodeID string `mapstructure:"NodeID" json:"NodeID" yaml:"NodeID"` + TaskID string `mapstructure:"taskId" json:"taskId" yaml:"taskId"` +} + +type TasksGroupIDVal struct { + GroupID string `mapstructure:"groupId" json:"groupId" yaml:"groupId"` + TaskID string `mapstructure:"taskId" json:"taskId" yaml:"taskId"` +} + +type TasksTypeVal struct { + TaskID string `map:"taskId" json:"taskId" yaml:"taskId"` + GroupID string `map:"groupId" json:"groupId" yaml:"groupId"` + NodeID string `map:"nodeId" json:"nodeId" yaml:"nodeId"` +} + +type TasksMD5Val struct { + TaskID string `map:"taskId" json:"taskId" yaml:"taskId"` + GroupID string `map:"groupId" json:"groupId" yaml:"groupId"` + NodeID string `map:"nodeId" json:"nodeId" yaml:"nodeId"` +} + +// LastKeyAcross 最后一个key流过引擎的时间 +type LastKeyAcross struct { + LastKeyCommitTime int64 `map:"lastKeyCommitTime" json:"lastKeyCommitTime" yaml:"lastKeyCommitTime"` + LastKeyUpdateTime int64 `map:"lastKeyUpdateTime" json:"lastKeyUpdateTime" yaml:"lastKeyUpdateTime"` + TaskID string `map:"taskId" json:"taskId" yaml:"taskId"` + GroupID string `map:"groupId" json:"groupId" yaml:"groupId"` +} diff --git a/global/errors.go b/global/errors.go new file mode 100644 index 0000000..abd791f --- /dev/null +++ b/global/errors.go @@ -0,0 +1,46 @@ +package global + +type ErrorCode int +type Error struct { + Code ErrorCode `map:"code" json:"code" yaml:"code"` + Msg string `map:"msg" json:"msg" yaml:"msg"` +} + +const ( + ErrorSystemError ErrorCode = 10001 + ErrorCursorFinished ErrorCode = 20001 + ErrorNodeNotExists ErrorCode = 40001 + ErrorNodeIsRunning ErrorCode = 40002 + ErrorNodeNotAlive ErrorCode = 40003 + ErrorTaskNotExists ErrorCode = 50001 + ErrorTaskStatusIsNil ErrorCode = 50002 + ErrorTaskGroupNotExists ErrorCode = 50003 + ErrorEtcdKeyNotExists ErrorCode = 60001 +) + +func (code ErrorCode) String() string { + switch code { + case 20001: + return "cursor query have finished" + case 40001: + return "node not exists" + case 40002: + return "node is running" + case 40003: + return "node not alive" + case 50001: + return "task not exists" + case 50002: + return "task status is nil" + case 50003: + return "task group not exists" + case 60001: + return "etcd key not exists" + default: + return "" + } +} + +func (err *Error) Error() string { + return err.Code.String() +} diff --git a/global/globalconfig.go b/global/globalconfig.go new file mode 100644 index 0000000..8f84b16 --- /dev/null +++ b/global/globalconfig.go @@ -0,0 +1,16 @@ +package global + +import ( + "github.com/spf13/viper" + "go.uber.org/zap" + "sync" + "testcase/config" +) + +var ( + RSPViper *viper.Viper + RSPLog *zap.Logger + RSPConfig config.Server + once sync.Once +) + diff --git a/global/globallog.go b/global/globallog.go new file mode 100644 index 0000000..610cc5c --- /dev/null +++ b/global/globallog.go @@ -0,0 +1,41 @@ +package global + +import ( + "github.com/sirupsen/logrus" + "os" + "sync" +) + +var ( + log *logrus.Logger + initLog sync.Once +) + +func GetInstance() *logrus.Logger { + initLog.Do(func() { + log = logrus.New() + log.Formatter = &logrus.TextFormatter{} + log.Out = os.Stdout + log.Level = logrus.DebugLevel + }) + return log +} + +// SetLog 设置log +func SetLog(l *logrus.Logger) { + log = l +} + +// WithField 使用全局log返回logrus.Entry指针 +func WithField(key string, value interface{}) *logrus.Entry { + return log.WithField(key, value) +} + +func WithFields(fields logrus.Fields) *logrus.Entry { + return log.WithFields(fields) +} + +// Debug 使用全局log记录信息 +func Debug(args ...interface{}) { + log.Debug(args...) +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..e6d15ac --- /dev/null +++ b/go.mod @@ -0,0 +1,26 @@ +module testcase + +go 1.16 + +require ( + github.com/chzyer/logex v1.1.10 // indirect + github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e + github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 // indirect + github.com/fsnotify/fsnotify v1.4.7 + github.com/go-redis/redis/v7 v7.0.0-beta.4 + github.com/json-iterator/go v1.1.12 + github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible + github.com/lestrrat-go/strftime v1.0.5 // indirect + github.com/mattn/go-shellwords v1.0.11 + github.com/panjf2000/ants/v2 v2.2.2 + github.com/pkg/errors v0.8.1 + github.com/satori/go.uuid v1.2.0 + github.com/sirupsen/logrus v1.2.0 + github.com/spf13/cobra v0.0.5 + github.com/spf13/viper v1.6.1 + github.com/tidwall/gjson v1.6.5 + go.uber.org/zap v1.10.0 + golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 + gopkg.in/yaml.v2 v2.2.7 + +) diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..c234d7b --- /dev/null +++ b/go.sum @@ -0,0 +1,231 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-redis/redis/v7 v7.0.0-beta.4 h1:p6z7Pde69EGRWvlC++y8aFcaWegyrKHzOBGo0zUACTQ= +github.com/go-redis/redis/v7 v7.0.0-beta.4/go.mod h1:xhhSbUMTsleRPur+Vgx9sUHtyN33bdjxY+9/0n9Ig8s= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/jonboulle/clockwork v0.1.0 h1:VKV+ZcuP6l3yW9doeqz6ziZGgcynBVQO+obU0+0hcPo= +github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc h1:RKf14vYWi2ttpEmkA4aQ3j4u9dStX2t4M8UM6qqNsG8= +github.com/lestrrat-go/envload v0.0.0-20180220234015-a3eb8ddeffcc/go.mod h1:kopuH9ugFRkIXf3YoqHKyrJ9YfUFsckUU9S7B+XP+is= +github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible h1:Y6sqxHMyB1D2YSzWkLibYKgg+SwmyFU9dF2hn6MdTj4= +github.com/lestrrat-go/file-rotatelogs v2.4.0+incompatible/go.mod h1:ZQnN8lSECaebrkQytbHj4xNgtg8CR7RYXnPok8e0EHA= +github.com/lestrrat-go/strftime v1.0.5 h1:A7H3tT8DhTz8u65w+JRpiBxM4dINQhUXAZnhBa2xeOE= +github.com/lestrrat-go/strftime v1.0.5/go.mod h1:E1nN3pCbtMSu1yjSVeyuRFVm/U0xoR76fd03sz+Qz4g= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.1 h1:ZC2Vc7/ZFkGmsVC9KvOjumD+G5lXy2RtTKyzRKO2BQ4= +github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mattn/go-shellwords v1.0.11 h1:vCoR9VPpsk/TZFW2JwK5I9S0xdrtUq2bph6/YjEPnaw= +github.com/mattn/go-shellwords v1.0.11/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.8.0 h1:VkHVNpR4iVnU8XQR6DBm8BqYjN7CRzw+xKUbVVbbW9w= +github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.5.0 h1:izbySO9zDPmjJ8rDjLvkA2zJHIo+HkYXHnf7eN7SSyo= +github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/panjf2000/ants/v2 v2.2.2 h1:TWzusBjq/IflXhy+/S6u5wmMLCBdJnB9tPIx9Zmhvok= +github.com/panjf2000/ants/v2 v2.2.2/go.mod h1:1GFm8bV8nyCQvU5K4WvBCTG1/YBFOD2VzjffD8fV55A= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= +github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/sirupsen/logrus v1.2.0 h1:juTguoYk5qI21pwyTXY3B3Y5cOTH3ZUyZCg1v/mihuo= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= +github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= +github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= +github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/spf13/viper v1.6.1 h1:VPZzIkznI1YhVMRi6vNFLHSwhnhReBfgTxIPccpfdZk= +github.com/spf13/viper v1.6.1/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/subosito/gotenv v1.2.0 h1:Slr1R9HxAlEKefgq5jn9U+DnETlIUa6HfgEzj0g5d7s= +github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= +github.com/tidwall/gjson v1.3.5 h1:2oW9FBNu8qt9jy5URgrzsVx/T/KSn3qn/smJQ0crlDQ= +github.com/tidwall/gjson v1.3.5/go.mod h1:P256ACg0Mn+j1RXIDXoss50DeIABTYK1PULOJHhxOls= +github.com/tidwall/gjson v1.6.5 h1:P/K9r+1pt9AK54uap7HcoIp6T3a7AoMg3v18tUis+Cg= +github.com/tidwall/gjson v1.6.5/go.mod h1:zeFuBCIqD4sN/gmqBzZ4j7Jd6UcA2Fc56x7QFsv+8fI= +github.com/tidwall/match v1.0.1 h1:PnKP62LPNxHKTwvHHZZzdOAOCtsJTjo6dZLCwpKm5xc= +github.com/tidwall/match v1.0.1/go.mod h1:LujAq0jyVjBy028G1WhWfIzbpQfMO8bBZ6Tyb0+pL9E= +github.com/tidwall/match v1.0.3 h1:FQUVvBImDutD8wJLN6c5eMzWtjgONK9MwIBCOrUJKeE= +github.com/tidwall/match v1.0.3/go.mod h1:eRSPERbgtNPcGhD8UCthc6PmLEQXEWd3PRB5JTxsfmM= +github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= +github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tidwall/pretty v1.0.2 h1:Z7S3cePv9Jwm1KwS0513MRaoUe3S01WPbLNV40pwWZU= +github.com/tidwall/pretty v1.0.2/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= +github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092 h1:4QSRKanuywn15aTZvI/mIDEgPQpswuFndXpOj3rKEco= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181107165924-66b7b1311ac8/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/ini.v1 v1.51.0 h1:AQvPpx3LzTDM0AjnIRlVFwFFGC+npRopjZxLJj6gdno= +gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.7 h1:VUgggvou5XRW9mHwD/yXxIYSMtY0zoKQf/v226p2nyo= +gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/interact/cli.go b/interact/cli.go new file mode 100644 index 0000000..0b2dbaf --- /dev/null +++ b/interact/cli.go @@ -0,0 +1,245 @@ +package interact + +import ( + "fmt" + "github.com/chzyer/readline" + "github.com/mattn/go-shellwords" + "github.com/spf13/cobra" + "io" + "os" + "strings" + "testcase/commons" + "testcase/core" + "testcase/global" + + //"interactioncli/check" + "testcase/cmd" +) + +type CommandFlags struct { + URL string + CAPath string + CertPath string + KeyPath string + Help bool +} + +var ( + commandFlags = CommandFlags{} + cfgFile string + //detach bool + //syncserver string + //Confignotseterr error + interact bool + //version bool +) + +var LivePrefixState struct { + LivePrefix string + IsEnable bool +} + +var query = "" + +var readLineCompleter *readline.PrefixCompleter + +func init() { + cobra.EnablePrefixMatching = true + cobra.OnInitialize(initConfig) +} + +func cliRun(cmd *cobra.Command, args []string) { + banner := "\n _ _ _____ \n _ __ ___ __| (_)___ ___ _ _ _ __ ___ ___ _ _|_ _|\n | '__/ _ \\/ _` | / __/ __| | | | '_ \\ / __/ _ \\ '__|| | \n | | | __/ (_| | \\__ \\__ \\ |_| | | | | (_| __/ | | | \n |_| \\___|\\__,_|_|___/___/\\__, |_| |_|\\___\\___|_| |_| \n |___/ \n" + if interact { + //err := check.CheckEnv() + //if err != nil { + // fmt.Println(err) + // os.Exit(1) + //} + + cmd.Println(banner) + cmd.Println("Input 'help;' for usage. \nCommand must end with ';'. \n'tab' for command complete.\n^C or exit to quit.") + loop() + return + } + + if len(args) == 0 { + cmd.Help() + return + } + +} + +func getBasicCmd() *cobra.Command { + + rootCmd := &cobra.Command{ + Use: "redissyncer-test", + Short: "redissyncer command line interface", + Long: "", + } + + rootCmd.PersistentFlags().BoolVarP(&commandFlags.Help, "help", "h", false, "help message") + + rootCmd.AddCommand( + cmd.NewConfigCommand(), + cmd.NewExecCommand(), + cmd.NewGenDataCommand(), + cmd.NewCaseListCommand(), + ) + + rootCmd.Flags().ParseErrorsWhitelist.UnknownFlags = true + rootCmd.SilenceErrors = true + return rootCmd +} + +func getInteractCmd(args []string) *cobra.Command { + rootCmd := getBasicCmd() + rootCmd.Run = func(cmd *cobra.Command, args []string) { + } + + rootCmd.SetArgs(args) + rootCmd.ParseFlags(args) + rootCmd.SetOut(os.Stdout) + hiddenFlag(rootCmd) + + return rootCmd +} + +func getMainCmd(args []string) *cobra.Command { + rootCmd := getBasicCmd() + + rootCmd.PersistentFlags().StringVarP(&cfgFile, "config", "c", "", "config file (default is $HOME/config.yaml)") + //rootCmd.PersistentFlags().StringVarP(&syncserver, "syncserver", "s", "", "sync server address") + //rootCmd.Flags().BoolVarP(&detach, "detach", "d", true, "Run pdctl without readline.") + rootCmd.Flags().BoolVarP(&interact, "interact", "i", false, "Run pdctl with readline.") + //rootCmd.Flags().BoolVarP(&version, "version", "V", false, "Print version information and exit.") + + rootCmd.Run = cliRun + + rootCmd.SetArgs(args) + rootCmd.ParseFlags(args) + rootCmd.SetOut(os.Stdout) + + readLineCompleter = readline.NewPrefixCompleter(genCompleter(rootCmd)...) + return rootCmd +} + +// Hide the flags in help and usage messages. +func hiddenFlag(cmd *cobra.Command) { + cmd.LocalFlags().MarkHidden("pd") + cmd.LocalFlags().MarkHidden("cacert") + cmd.LocalFlags().MarkHidden("cert") + cmd.LocalFlags().MarkHidden("key") +} + +// MainStart start main command +func MainStart(args []string) { + startCmd(getMainCmd, args) +} + +// Start start interact command +func Start(args []string) { + startCmd(getInteractCmd, args) +} + +func startCmd(getCmd func([]string) *cobra.Command, args []string) { + rootCmd := getCmd(args) + + if err := rootCmd.Execute(); err != nil { + rootCmd.Println(err) + } +} + +// initConfig reads in config file and ENV variables if set. +func initConfig() { + + if global.RSPViper != nil { + return + } + if cfgFile != "" { + if !commons.FileExists(cfgFile) { + panic("config file not exists") + } + + global.RSPViper = core.Viper(cfgFile) + global.RSPLog = core.Zap() + return + } + global.RSPViper = core.Viper() + global.RSPLog = core.Zap() + +} + +func loop() { + rl, err := readline.NewEx(&readline.Config{ + Prompt: "redissyncer-test> ", + HistoryFile: "/tmp/readline.tmp", + AutoComplete: readLineCompleter, + DisableAutoSaveHistory: true, + InterruptPrompt: "^C", + EOFPrompt: "^D", + HistorySearchFold: true, + }) + if err != nil { + panic(err) + } + defer rl.Close() + + var cmds []string + + for { + line, err := rl.Readline() + if err != nil { + if err == readline.ErrInterrupt { + break + } else if err == io.EOF { + break + } + continue + } + if line == "exit" { + os.Exit(0) + } + + line = strings.TrimSpace(line) + if len(line) == 0 { + continue + } + cmds = append(cmds, line) + + if !strings.HasSuffix(line, ";") { + rl.SetPrompt("... ") + continue + } + cmd := strings.Join(cmds, " ") + cmds = cmds[:0] + rl.SetPrompt("redissyncer-test> ") + rl.SaveHistory(cmd) + + args, err := shellwords.Parse(cmd) + if err != nil { + fmt.Printf("parse command err: %v\n", err) + continue + } + Start(args) + } +} + +func genCompleter(cmd *cobra.Command) []readline.PrefixCompleterInterface { + pc := []readline.PrefixCompleterInterface{} + + for _, v := range cmd.Commands() { + if v.HasFlags() { + flagsPc := []readline.PrefixCompleterInterface{} + flagUsages := strings.Split(strings.Trim(v.Flags().FlagUsages(), " "), "\n") + for i := 0; i < len(flagUsages)-1; i++ { + flagsPc = append(flagsPc, readline.PcItem(strings.Split(strings.Trim(flagUsages[i], " "), " ")[0])) + } + flagsPc = append(flagsPc, genCompleter(v)...) + pc = append(pc, readline.PcItem(strings.Split(v.Use, " ")[0], flagsPc...)) + } else { + pc = append(pc, readline.PcItem(strings.Split(v.Use, " ")[0], genCompleter(v)...)) + } + } + return pc +} diff --git a/main.go b/main.go new file mode 100644 index 0000000..c4dd6df --- /dev/null +++ b/main.go @@ -0,0 +1,68 @@ +/* +Copyright © 2019 NAME HERE + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ +package main + +import ( + "fmt" + "io/ioutil" + "os" + "os/signal" + "strings" + "syscall" + "testcase/interact" +) + + +func main() { + + //fmt.Println(global.RSPViper) + //global.RSPLog = core.Zap() + + pdAddr := os.Getenv("PD_ADDR") + if pdAddr != "" { + os.Args = append(os.Args, "-u", pdAddr) + } + + sc := make(chan os.Signal, 1) + signal.Notify(sc, + syscall.SIGHUP, + syscall.SIGINT, + syscall.SIGTERM, + syscall.SIGQUIT) + + go func() { + sig := <-sc + fmt.Printf("\nGot signal [%v] to exit.\n", sig) + switch sig { + case syscall.SIGTERM: + os.Exit(0) + default: + os.Exit(1) + } + }() + + var input []string + stat, _ := os.Stdin.Stat() + if (stat.Mode() & os.ModeCharDevice) == 0 { + b, err := ioutil.ReadAll(os.Stdin) + if err != nil { + fmt.Println(err) + return + } + input = strings.Split(strings.TrimSpace(string(b[:])), " ") + } + interact.MainStart(append(os.Args[1:], input...)) +} diff --git a/model/nodemodel.go b/model/nodemodel.go new file mode 100644 index 0000000..eb8aaf5 --- /dev/null +++ b/model/nodemodel.go @@ -0,0 +1,7 @@ +package model + +type RemoveNodeModel struct { + NodeType string + NodeID string + TasksOnNodePolice string //位于该节点任务的政策,销毁或迁移 'destroy'、'move' +} diff --git a/model/response/taskresp.go b/model/response/taskresp.go new file mode 100644 index 0000000..87f7781 --- /dev/null +++ b/model/response/taskresp.go @@ -0,0 +1,39 @@ +package response + +import "testcase/global" + +type StopTasksResult struct { + TaskID string `map:"taskId" json:"taskId" yaml:"taskId"` + Result string `map:"result" json:"result" yaml:"result"` +} + +type TaskStatusResult struct { + TaskID string `map:"taskId" json:"taskId" yaml:"taskId"` + Errors *global.Error `map:"errors" json:"errors" yaml:"errors"` + TaskStatus *global.TaskStatus `map:"taskStatus" json:"taskStatus" yaml:"taskStatus"` +} + +type TaskStatusResultByName struct { + TaskName string `map:"taskName" json:"taskName" yaml:"taskName"` + Errors *global.Error `map:"errors" json:"errors" yaml:"errors"` + TaskStatus *global.TaskStatus `map:"taskStatus" json:"taskStatus" yaml:"taskStatus"` +} + +type TaskStatusResultByGroupID struct { + GroupID string `map:"taskId" json:"taskId" yaml:"taskId"` + Errors *global.Error `map:"errors" json:"errors" yaml:"errors"` + TaskStatusArray []*TaskStatusResult `map:"taskStatus" json:"taskStatus" yaml:"taskStatus"` +} + +type AllTaskStatusResult struct { + QueryID string `map:"queryID" json:"queryID" yaml:"queryID"` + LastPage bool `map:"lastPage" json:"lastPage" yaml:"lastPage"` + CurrentPage int64 `map:"currentPage" json:"currentPage" yaml:"currentPage"` + Errors []*global.Error `map:"errors" json:"errors" yaml:"errors"` + TaskStatusArray []*TaskStatusResult `map:"taskStatus" json:"taskStatus" yaml:"taskStatus"` +} + +type LastKeyAcrossResult struct { + LastKeyAcross global.LastKeyAcross `map:"lastkeyacross" json:"lastkeyacross" yaml:"lastkeyacross"` + Errors global.Error `map:"errors" json:"errors" yaml:"errors"` +} diff --git a/model/taskmodel.go b/model/taskmodel.go new file mode 100644 index 0000000..d3f46b9 --- /dev/null +++ b/model/taskmodel.go @@ -0,0 +1,43 @@ +package model + +type TaskCreateBody struct { + Name string `json:"name"` + Password int64 `json:"password"` +} + +type TaskStart struct { + TaskID string `mapstructure:"taskid" json:"taskid" yaml:"taskid"` + Afresh string `mapstructure:"afresh" json:"afresh" yaml:"afresh"` +} + +type TaskStopBodyToNode struct { + TaskIDs []string `maps:"taskids" json:"taskids" yaml:"taskids"` +} + +type TaskIDBody struct { + TaskID string `maps:"taskid" json:"taskid" yaml:"taskid"` +} + +type TaskListByTaskIDs struct { + TaskIDs []string `maps:"taskIDs" json:"taskIDs" yaml:"taskIDs"` +} + +type TaskListByGroupIDs struct { + GroupIDs []string `maps:"groupIDs" json:"groupIDs" yaml:"groupIDs"` +} + +type TaskListByTaskNames struct { + TaskNames []string `maps:"taskNames" json:"taskNames" yaml:"taskNames"` +} + +type TaskListAll struct { + QueryID string `maps:"queryID" json:"queryID" yaml:"queryID"` + BatchSize int64 `maps:"batchSize" json:"batchSize" yaml:"batchSize"` + KeyPrefix string `maps:"keyPrefix" json:"keyPrefix" yaml:"keyPrefix"` +} + +type TaskListByNode struct { + NodeID string `maps:"nodeID" json:"nodeID" yaml:"nodeID"` + QueryID string `maps:"queryID" json:"queryID" yaml:"queryID"` + BatchSize int64 `maps:"batchSize" json:"batchSize" yaml:"batchSize"` +} diff --git a/synctaskhandle/taskhandle.go b/synctaskhandle/taskhandle.go new file mode 100644 index 0000000..d1b4210 --- /dev/null +++ b/synctaskhandle/taskhandle.go @@ -0,0 +1,306 @@ +package synctaskhandle + +import ( + "encoding/json" + "errors" + "fmt" + "github.com/tidwall/gjson" + "go.uber.org/zap" + "io/ioutil" + "net/http" + "os" + "strings" + "testcase/global" + //"testcase/globalzap" + "testcase/model/response" + "time" + jsoniter "github.com/json-iterator/go" +) + +var logger = global.RSPLog + +const CreateTaskPath = "/api/task/create" +const StartTaskPath = "/api/task/start" +const StopTaskPath = "/api/task/stop" +const RemoveTaskPath = "/api/task/remove" +const ListTasksPath = "/api/task/listtasks" +const TaskListByIDs = "/api/task/listbyids" +const TaskListByName = "/api/task/listbynames" +const LastKeyAcross = "/api/task/lastkeyacross" +const ImportFilePath = "/api/v2/file/createtask" + +type Request struct { + Server string + Api string + Body string +} + +func (r Request) ExecRequest() (result string) { + client := &http.Client{} + //req, err := http.NewRequest("POST", r.Server+r.Api, strings.NewReader(r.Body)) + // + //if err != nil { + // logger.Sugar().Error(err) + // os.Exit(1) + //} + // + //req.Header.Set("Content-Type", "application/json") + //resp, err := client.Do(req) + + resp, err := client.Post(r.Server+r.Api, "application/json", strings.NewReader(r.Body)) + + if err != nil { + logger.Sugar().Error(err) + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + //logger.Sugar().Error(err) + logger.Sugar().Error(err) + os.Exit(1) + } + + //var dat map[string]interface{} + //json.Unmarshal(body, &dat) + //bodystr, jsonerr := json.MarshalIndent(dat, "", " ") + //if jsonerr != nil { + // logger.Sugar().Error(err) + //} + //return string(bodystr) + return string(body) +} + +//创建导入文件任务 +func Import(syncserver string, createjson string) []string { + importreq := &Request{ + Server: syncserver, + Api: ImportFilePath, + Body: createjson, + } + + resp := importreq.ExecRequest() + + taskids := gjson.Get(resp, "data").Array() + + if len(taskids) == 0 { + logger.Error("task create faile", zap.Any("response_info", resp)) + os.Exit(1) + } + taskidsstrarray := []string{} + for _, v := range taskids { + //fmt.Println(gjson.Get(v.String(), "taskId").String()) + taskidsstrarray = append(taskidsstrarray, gjson.Get(v.String(), "taskId").String()) + } + + return taskidsstrarray + +} + +//创建同步任务 +func CreateTask(syncserver string, createjson string) []string { + createreq := &Request{ + Server: syncserver, + Api: CreateTaskPath, + Body: createjson, + } + + fmt.Println(createjson) + resp := createreq.ExecRequest() + taskids := gjson.Get(resp, "data").Array() + fmt.Println(resp) + if len(taskids) == 0 { + logger.Sugar().Error(errors.New("task create faile \n"), resp) + //logger.Sugar().Info(resp) + os.Exit(1) + } + taskidsstrarray := []string{} + for _, v := range taskids { + //fmt.Println(gjson.Get(v.String(), "taskId").String()) + taskidsstrarray = append(taskidsstrarray, gjson.Get(v.String(), "taskId").String()) + } + + return taskidsstrarray + +} + +//Start task +func StartTask(syncserver string, taskid string) { + jsonmap := make(map[string]interface{}) + jsonmap["taskid"] = taskid + startjson, err := json.Marshal(jsonmap) + if err != nil { + logger.Sugar().Error(err) + os.Exit(1) + } + startreq := &Request{ + Server: syncserver, + Api: StartTaskPath, + Body: string(startjson), + } + startreq.ExecRequest() + +} + +//Stop task by task ids +func StopTaskByIds(syncserver string, taskId string) { + jsonmap := make(map[string]interface{}) + + jsonmap["taskId"] = taskId + stopjsonStr, err := json.Marshal(jsonmap) + if err != nil { + logger.Sugar().Error(err) + os.Exit(1) + } + stopreq := &Request{ + Server: syncserver, + Api: StopTaskPath, + Body: string(stopjsonStr), + } + response := stopreq.ExecRequest() + fmt.Println(response) + +} + +//Remove task by name +func RemoveTaskByName(syncserver string, taskname string) { + jsonmap := make(map[string]interface{}) + + taskids, err := GetSameTaskNameIDs(syncserver, taskname) + if err != nil { + logger.Sugar().Error(err) + os.Exit(1) + } + + if len(taskids) == 0 { + return + } + + for _, id := range taskids { + jsonmap["taskId"] = id + stopjsonStr, err := json.Marshal(jsonmap) + if err != nil { + logger.Sugar().Error(err) + os.Exit(1) + } + stopreq := &Request{ + Server: syncserver, + Api: StopTaskPath, + Body: string(stopjsonStr), + } + stopResult := stopreq.ExecRequest() + fmt.Println(stopResult) + + time.Sleep(10 * time.Second) + + removereq := &Request{ + Server: syncserver, + Api: RemoveTaskPath, + Body: string(stopjsonStr), + } + + removeResult := removereq.ExecRequest() + fmt.Println(removeResult) + } + +} + +//获取同步任务状态 +func GetTaskStatus(syncserver string, ids []string) (map[string]string, error) { + jsonmap := make(map[string]interface{}) + + //jsonmap["regulation"] = "byids" + jsonmap["taskIDs"] = ids + + listtaskjsonStr, err := json.Marshal(jsonmap) + if err != nil { + return nil, err + } + listreq := &Request{ + Server: syncserver, + Api: TaskListByIDs, + Body: string(listtaskjsonStr), + } + listresp := listreq.ExecRequest() + + taskarray := gjson.Get(listresp, "result").Array() + + if len(taskarray) == 0 { + return nil, errors.New("No status return") + } + + statusmap := make(map[string]string) + + for _, v := range taskarray { + id := gjson.Get(v.String(), "taskId").String() + status := v.String() + statusmap[id] = status + } + + return statusmap, nil +} + +// @title GetSameTaskNameIDs +// @description 获取同名任务列表 +// @auth Jsw 时间(2020/7/1 10:57 ) +// @param syncserver string "redissyncer ip:port" +// @param taskname string "任务名称" +// @return taskids []string "任务id数组" +func GetSameTaskNameIDs(syncserver string, taskname string) ([]string, error) { + + existstaskids := []string{} + listjsonmap := make(map[string]interface{}) + //listjsonmap["regulation"] = "bynames" + listjsonmap["taskNames"] = strings.Split(taskname, ",") + listjsonStr, err := json.Marshal(listjsonmap) + if err != nil { + logger.Info(err.Error()) + return nil, err + } + listtaskreq := &Request{ + Server: syncserver, + Api: TaskListByName, + Body: string(listjsonStr), + } + + listresp := listtaskreq.ExecRequest() + tasklist := gjson.Get(listresp, "result").Array() + + if len(tasklist) > 0 { + for _, v := range tasklist { + existstaskids = append(existstaskids, gjson.Get(v.String(), "taskStatus.taskId").String()) + } + } + + return existstaskids, nil +} + +func GetLastKeyAcross(syncserver string, taskID string) (response.LastKeyAcrossResult, error) { + var jsoniter = jsoniter.ConfigCompatibleWithStandardLibrary + var result response.LastKeyAcrossResult + reqJson := make(map[string]interface{}) + reqJson["taskId"] = taskID + jsonStr, err := json.Marshal(reqJson) + if err != nil { + logger.Info(err.Error()) + return result, err + } + req := &Request{ + Server: syncserver, + Api: LastKeyAcross, + Body: string(jsonStr), + } + + resp := req.ExecRequest() + + //jsonstr, err := jsoniter.Marshal(resp) + //if err != nil { + // return result, nil + //} + + if err := jsoniter.Unmarshal([]byte(resp), &result); err != nil { + return result, err + } + + return result, nil +} \ No newline at end of file diff --git a/tasks/cluster2cluster.json b/tasks/cluster2cluster.json new file mode 100644 index 0000000..9e6b308 --- /dev/null +++ b/tasks/cluster2cluster.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "testredis0102", + "sourceRedisAddress": "114.67.67.7:36379;114.67.83.163:36379;114.67.112.67:36379", + "targetRedisAddress": "114.67.67.7:16379;114.67.67.7:16380;114.67.83.163:16379;114.67.83.163:16380;114.67.112.67:16379;114.67.112.67:16380", + "targetPassword": "testredis0102", + "taskName": "cluster2cluster", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} \ No newline at end of file diff --git a/tasks/importaof2single.json b/tasks/importaof2single.json new file mode 100644 index 0000000..a00144a --- /dev/null +++ b/tasks/importaof2single.json @@ -0,0 +1,10 @@ +{ + "fileAddress": "/data", + "targetRedisAddress": "10.0.1.102:6379", + "targetPassword": "redistest0102", + "taskName": "importaofcase", + "autostart": true, + "fileType": "AOF", + "batchSize": 500, + "targetRedisVersion": 4.0 +} \ No newline at end of file diff --git a/tasks/importrdb2single.json b/tasks/importrdb2single.json new file mode 100644 index 0000000..13229b3 --- /dev/null +++ b/tasks/importrdb2single.json @@ -0,0 +1,10 @@ +{ + "fileAddress": "/data/dump.rdb", + "targetRedisAddress": "10.0.1.102:6379", + "targetPassword": "redistest0102", + "taskName": "importrdbcase", + "autostart": true, + "fileType": "RDB", + "batchSize": 500, + "targetRedisVersion": 5.0 +} diff --git a/tasks/listtasks.json b/tasks/listtasks.json new file mode 100644 index 0000000..bca4b3c --- /dev/null +++ b/tasks/listtasks.json @@ -0,0 +1,3 @@ +{ + "regulation": "all" +} \ No newline at end of file diff --git a/tasks/single2cluster.json b/tasks/single2cluster.json new file mode 100644 index 0000000..51e60cd --- /dev/null +++ b/tasks/single2cluster.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.45:6379", + "targetRedisAddress": "114.67.67.7:16379;114.67.67.7:16380;114.67.83.163:16379;114.67.83.163:16380;114.67.112.67:16379;114.67.112.67:16380", + "targetPassword": "testredis0102", + "taskName": "single2cluster", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single.json b/tasks/single2single.json new file mode 100644 index 0000000..c0128e4 --- /dev/null +++ b/tasks/single2single.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "114.67.76.82:16374", + "targetRedisAddress": "114.67.120.120:16374", + "targetPassword": "redistest0102", + "taskName": "single2single", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v3_v3.json b/tasks/single2single_v3_v3.json new file mode 100644 index 0000000..b127856 --- /dev/null +++ b/tasks/single2single_v3_v3.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "taskName": "single2single_v3_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v3_v4.json b/tasks/single2single_v3_v4.json new file mode 100644 index 0000000..3297026 --- /dev/null +++ b/tasks/single2single_v3_v4.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "taskName": "single2single_v3_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v3_v5.json b/tasks/single2single_v3_v5.json new file mode 100644 index 0000000..6d4639e --- /dev/null +++ b/tasks/single2single_v3_v5.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "taskName": "single2single_v3_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v3_v6.json b/tasks/single2single_v3_v6.json new file mode 100644 index 0000000..493f108 --- /dev/null +++ b/tasks/single2single_v3_v6.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "taskName": "single2single_v3_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v4_v3.json b/tasks/single2single_v4_v3.json new file mode 100644 index 0000000..37e5ac3 --- /dev/null +++ b/tasks/single2single_v4_v3.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "taskName": "single2single_v4_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v4_v4.json b/tasks/single2single_v4_v4.json new file mode 100644 index 0000000..d6c6ae1 --- /dev/null +++ b/tasks/single2single_v4_v4.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "taskName": "single2single_v4_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v4_v5.json b/tasks/single2single_v4_v5.json new file mode 100644 index 0000000..9dc19eb --- /dev/null +++ b/tasks/single2single_v4_v5.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "taskName": "single2single_v5_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v4_v6.json b/tasks/single2single_v4_v6.json new file mode 100644 index 0000000..1e94bcf --- /dev/null +++ b/tasks/single2single_v4_v6.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "taskName": "single2single_v4_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v5_v3.json b/tasks/single2single_v5_v3.json new file mode 100644 index 0000000..33bfa32 --- /dev/null +++ b/tasks/single2single_v5_v3.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "taskName": "single2single_v5_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v5_v4.json b/tasks/single2single_v5_v4.json new file mode 100644 index 0000000..1a03b5b --- /dev/null +++ b/tasks/single2single_v5_v4.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "taskName": "single2single_v5_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v5_v5.json b/tasks/single2single_v5_v5.json new file mode 100644 index 0000000..422fbac --- /dev/null +++ b/tasks/single2single_v5_v5.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "taskName": "single2single_v5_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v5_v6.json b/tasks/single2single_v5_v6.json new file mode 100644 index 0000000..346e048 --- /dev/null +++ b/tasks/single2single_v5_v6.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "taskName": "single2single_v5_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v6_v3.json b/tasks/single2single_v6_v3.json new file mode 100644 index 0000000..0767683 --- /dev/null +++ b/tasks/single2single_v6_v3.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "taskName": "single2single_v6_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v6_v4.json b/tasks/single2single_v6_v4.json new file mode 100644 index 0000000..9f44b8e --- /dev/null +++ b/tasks/single2single_v6_v4.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "taskName": "single2single_v6_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v6_v5.json b/tasks/single2single_v6_v5.json new file mode 100644 index 0000000..88c6fc5 --- /dev/null +++ b/tasks/single2single_v6_v5.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "taskName": "single2single_v6_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2single_v6_v6.json b/tasks/single2single_v6_v6.json new file mode 100644 index 0000000..1137371 --- /dev/null +++ b/tasks/single2single_v6_v6.json @@ -0,0 +1,11 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "taskName": "single2single_v6_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 100 +} diff --git a/tasks/single2singlewithdbmap.json b/tasks/single2singlewithdbmap.json new file mode 100644 index 0000000..315531b --- /dev/null +++ b/tasks/single2singlewithdbmap.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "114.67.76.82:16374", + "targetRedisAddress": "114.67.120.120:16374", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v3_v3.json b/tasks/single2singlewithdbmap_v3_v3.json new file mode 100644 index 0000000..d060315 --- /dev/null +++ b/tasks/single2singlewithdbmap_v3_v3.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v3_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v3_v4.json b/tasks/single2singlewithdbmap_v3_v4.json new file mode 100644 index 0000000..a05c84c --- /dev/null +++ b/tasks/single2singlewithdbmap_v3_v4.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v3_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v3_v5.json b/tasks/single2singlewithdbmap_v3_v5.json new file mode 100644 index 0000000..04e2c78 --- /dev/null +++ b/tasks/single2singlewithdbmap_v3_v5.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v3_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v3_v6.json b/tasks/single2singlewithdbmap_v3_v6.json new file mode 100644 index 0000000..eaacc0e --- /dev/null +++ b/tasks/single2singlewithdbmap_v3_v6.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16373", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v3_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v4_v3.json b/tasks/single2singlewithdbmap_v4_v3.json new file mode 100644 index 0000000..df3c134 --- /dev/null +++ b/tasks/single2singlewithdbmap_v4_v3.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v4_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v4_v4.json b/tasks/single2singlewithdbmap_v4_v4.json new file mode 100644 index 0000000..2435236 --- /dev/null +++ b/tasks/single2singlewithdbmap_v4_v4.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v4_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v4_v5.json b/tasks/single2singlewithdbmap_v4_v5.json new file mode 100644 index 0000000..089aee2 --- /dev/null +++ b/tasks/single2singlewithdbmap_v4_v5.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v4_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v4_v6.json b/tasks/single2singlewithdbmap_v4_v6.json new file mode 100644 index 0000000..18911e4 --- /dev/null +++ b/tasks/single2singlewithdbmap_v4_v6.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16374", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v4_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v5_v3.json b/tasks/single2singlewithdbmap_v5_v3.json new file mode 100644 index 0000000..afe38fd --- /dev/null +++ b/tasks/single2singlewithdbmap_v5_v3.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v5_v3", + "targetRedisVersion": 3.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v5_v4.json b/tasks/single2singlewithdbmap_v5_v4.json new file mode 100644 index 0000000..8dc8f9c --- /dev/null +++ b/tasks/single2singlewithdbmap_v5_v4.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v5_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v5_v5.json b/tasks/single2singlewithdbmap_v5_v5.json new file mode 100644 index 0000000..fd0c39e --- /dev/null +++ b/tasks/single2singlewithdbmap_v5_v5.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v5_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v5_v6.json b/tasks/single2singlewithdbmap_v5_v6.json new file mode 100644 index 0000000..d33f200 --- /dev/null +++ b/tasks/single2singlewithdbmap_v5_v6.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16375", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v5_v6", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v6_v3.json b/tasks/single2singlewithdbmap_v6_v3.json new file mode 100644 index 0000000..87a3b24 --- /dev/null +++ b/tasks/single2singlewithdbmap_v6_v3.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16373", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v6_v3", + "targetRedisVersion": 6.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v6_v4.json b/tasks/single2singlewithdbmap_v6_v4.json new file mode 100644 index 0000000..a701fd3 --- /dev/null +++ b/tasks/single2singlewithdbmap_v6_v4.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16374", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v6_v4", + "targetRedisVersion": 4.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v6_v5.json b/tasks/single2singlewithdbmap_v6_v5.json new file mode 100644 index 0000000..22f53e7 --- /dev/null +++ b/tasks/single2singlewithdbmap_v6_v5.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16375", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v6_v5", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/tasks/single2singlewithdbmap_v6_v6.json b/tasks/single2singlewithdbmap_v6_v6.json new file mode 100644 index 0000000..6105821 --- /dev/null +++ b/tasks/single2singlewithdbmap_v6_v6.json @@ -0,0 +1,17 @@ +{ + "sourcePassword": "redistest0102", + "sourceRedisAddress": "10.0.1.31:16376", + "targetRedisAddress": "10.0.1.32:16376", + "targetPassword": "redistest0102", + "dbMapper": { + "0": 3, + "2": 6, + "3": 5, + "8": 10 + }, + "taskName": "single2singlewithdbmap_v6_v6", + "targetRedisVersion": 5.0, + "autostart": true, + "afresh": true, + "batchSize": 1000 +} diff --git a/utils/GenRandIP.go b/utils/GenRandIP.go new file mode 100644 index 0000000..afa3519 --- /dev/null +++ b/utils/GenRandIP.go @@ -0,0 +1,59 @@ +package utils + +import ( + "fmt" + "math/rand" + "time" +) + +// 随机生成合法 IP,如: 222.16.123.95 +func RandomIp() string { + // IP 范围二维数组 + ranges := ipRange() + idx := newRand().Intn(10) + return numToIp(ranges[idx][0] + newRand().Intn(ranges[idx][1]-ranges[idx][0])) +} + +// 随机生成(隐蔽后两位的)合法 IP,如: 222.16.*.* +func RandomOmicIp() string { + // IP 范围二维数组 + ranges := ipRange() + idx := newRand().Intn(10) + return numToOmicIp(ranges[idx][0] + newRand().Intn(ranges[idx][1]-ranges[idx][0])) +} + +func numToIp(num int) string { + arr := make([]int, 4) + arr[0] = (num >> 24) & 0xff + arr[1] = (num >> 16) & 0xff + arr[2] = (num >> 8) & 0xff + arr[3] = num & 0xff + return fmt.Sprintf("%d.%d.%d.%d", arr[0], arr[1], arr[2], arr[3]) +} + +func numToOmicIp(num int) string { + arr := make([]int, 2) + arr[0] = (num >> 24) & 0xff + arr[1] = (num >> 16) & 0xff + return fmt.Sprintf("%d.%d.*.*", arr[0], arr[1]) +} + +// IP 范围二维数组 +func ipRange() [][]int { + return [][]int{{607649792, 608174079}, //36.56.0.0-36.63.255.255 + {1038614528, 1039007743}, //61.232.0.0-61.237.255.255 + {1783627776, 1784676351}, //106.80.0.0-106.95.255.255 + {2035023872, 2035154943}, //121.76.0.0-121.77.255.255 + {2078801920, 2079064063}, //123.232.0.0-123.235.255.255 + {-1950089216, -1948778497}, //139.196.0.0-139.215.255.255 + {-1425539072, -1425014785}, //171.8.0.0-171.15.255.255 + {-1236271104, -1235419137}, //182.80.0.0-182.92.255.255 + {-770113536, -768606209}, //210.25.0.0-210.47.255.255 + {-569376768, -564133889}, //222.16.0.0-222.95.255.255 + } +} + +// 实例化随机数结构体,源为时间微秒 +func newRand() *rand.Rand { + return rand.New(rand.NewSource(time.Now().UnixNano())) +} diff --git a/utils/constant.go b/utils/constant.go new file mode 100644 index 0000000..f9d417f --- /dev/null +++ b/utils/constant.go @@ -0,0 +1,6 @@ +package utils + +const ( + ConfigEnv = "RSP_CONFIG" + ConfigFile = "config.yaml" +) diff --git a/utils/directory.go b/utils/directory.go new file mode 100644 index 0000000..ac82b7b --- /dev/null +++ b/utils/directory.go @@ -0,0 +1,45 @@ +package utils + +import ( + "go.uber.org/zap" + "os" + "testcase/global" +) + +//@function: PathExists +//@description: 文件目录是否存在 +//@param: path string +//@return: bool, error + +func PathExists(path string) (bool, error) { + _, err := os.Stat(path) + if err == nil { + return true, nil + } + if os.IsNotExist(err) { + return false, nil + } + return false, err +} + +//@function: CreateDir +//@description: 批量创建文件夹 +//@param: dirs ...string +//@return: err error + +func CreateDir(dirs ...string) (err error) { + for _, v := range dirs { + exist, err := PathExists(v) + if err != nil { + return err + } + if !exist { + global.RSPLog.Debug("create directory" + v) + err = os.MkdirAll(v, os.ModePerm) + if err != nil { + global.RSPLog.Error("create directory"+v, zap.Any(" error:", err)) + } + } + } + return err +} diff --git a/utils/rotatelogs_unix.go b/utils/rotatelogs_unix.go new file mode 100644 index 0000000..3ddde81 --- /dev/null +++ b/utils/rotatelogs_unix.go @@ -0,0 +1,30 @@ +// +build !windows + +package utils + +import ( + zaprotatelogs "github.com/lestrrat-go/file-rotatelogs" + "go.uber.org/zap/zapcore" + "os" + "path" + "testcase/global" + "time" +) + +//@author: [SliverHorn](https://github.com/SliverHorn) +//@function: GetWriteSyncer +//@description: zap logger中加入file-rotatelogs +//@return: zapcore.WriteSyncer, error + +func GetWriteSyncer() (zapcore.WriteSyncer, error) { + fileWriter, err := zaprotatelogs.New( + path.Join(global.RSPConfig.Zap.Director, "%Y-%m-%d.log"), + zaprotatelogs.WithLinkName(global.RSPConfig.Zap.LinkName), + zaprotatelogs.WithMaxAge(7*24*time.Hour), + zaprotatelogs.WithRotationTime(24*time.Hour), + ) + if global.RSPConfig.Zap.LogInConsole { + return zapcore.NewMultiWriteSyncer(zapcore.AddSync(os.Stdout), zapcore.AddSync(fileWriter)), err + } + return zapcore.AddSync(fileWriter), err +} diff --git a/utils/rotatelogs_windows.go b/utils/rotatelogs_windows.go new file mode 100644 index 0000000..96b9681 --- /dev/null +++ b/utils/rotatelogs_windows.go @@ -0,0 +1,26 @@ +package utils + +import ( + zaprotatelogs "github.com/lestrrat-go/file-rotatelogs" + "go.uber.org/zap/zapcore" + "os" + "path" + "redissyncer-portal/global" + "time" +) + +//@author: [SliverHorn](https://github.com/SliverHorn) +//@function: GetWriteSyncer +//@description: zap logger中加入file-rotatelogs +//@return: zapcore.WriteSyncer, error +func GetWriteSyncer() (zapcore.WriteSyncer, error) { + fileWriter, err := zaprotatelogs.New( + path.Join(global.RSPConfig.Zap.Director, "%Y-%m-%d.log"), + zaprotatelogs.WithMaxAge(7*24*time.Hour), + zaprotatelogs.WithRotationTime(24*time.Hour), + ) + if global.RSPConfig.Zap.LogInConsole { + return zapcore.NewMultiWriteSyncer(zapcore.AddSync(os.Stdout), zapcore.AddSync(fileWriter)), err + } + return zapcore.AddSync(fileWriter), err +} diff --git "a/\346\265\213\350\257\225\350\214\203\345\233\264.md" "b/\346\265\213\350\257\225\350\214\203\345\233\264.md" new file mode 100644 index 0000000..dfc476a --- /dev/null +++ "b/\346\265\213\350\257\225\350\214\203\345\233\264.md" @@ -0,0 +1,99 @@ +# 测试流程 +* 清空测试库 +* 生成全量数据 +* 生成任务 +* 生成增量数据 +* 停止任务 +* 核对数据正确性 + +# 测试范围 +* 创建任务 +* 启动任务 +* 停止任务 +* 删除任务 +* 单实例->单实例 +× 集群->单实例 +* 单实例->集群 +* rdb文件导入 +* aof文件导入 +* 双向同步 + +# 数据生成规则 + +* 全量数据生成规则 + * 生成biglist、bigset、bighash、string等数据 + * key格式prefix(uuid)_datatype_random +* 增量数据生成规则 + +# Case执行参数 + +* 执行时长 +* 线程数 + +# 测试case + +## 单实例->单实例 + +* v3 -> v4 +* v3 -> v5 +* v3 -> v6 +* v4 -> v3 +* v4 -> v5 +* v4 -> v6 +* v5 -> v4 +* v5 -> v3 +* v5 -> v6 +* v6 -> v4 +* v6 -> v3 +* v6 -> v5 + +## 集群->单实例 + +* v4 -> v5 +* v4 -> v6 +* v5 -> v4 +* v5 -> v6 +* v6 -> v4 +* v6 -> v5 + +## 单实例->集群 + +* v3 -> v4 +* v3 -> v5 +* v3 -> v6 +* v4 -> v5 +* v4 -> v6 +* v5 -> v4 +* v5 -> v6 +* v6 -> v4 +* v6 -> v5 + +## rdb文件导入 + +* v3 -> v4 +* v3 -> v5 +* v3 -> v6 +* v4 -> v3 +* v4 -> v5 +* v4 -> v6 +* v5 -> v4 +* v5 -> v3 +* v5 -> v6 +* v6 -> v4 +* v6 -> v3 +* v6 -> v5 + +## aof文件导入 + +* v3 -> v4 +* v3 -> v5 +* v3 -> v6 +* v4 -> v3 +* v4 -> v5 +* v4 -> v6 +* v5 -> v4 +* v5 -> v3 +* v5 -> v6 +* v6 -> v4 +* v6 -> v3 +* v6 -> v5 \ No newline at end of file