本文共 12264 字,大约阅读时间需要 40 分钟。
之前<<>>介绍了如何修改输出格式为csv
和json
以及如何加hook自定义增加压测输出指标
然而我每次还都是用默认的输出然后自己用了一堆屎命令去格式化, 类似这样
原始输出
[ 5s ] thds: 16 tps: 1269.60 qps: 25437.17 (r/w/o: 17813.58/5081.20/2542.40) lat (ms,95%): 23.10 err/s 0.00 reconn/s: 0.00ctime: 2020-04-05 16:27:44 node2: 0 node3: 0 node2_Rpl_semi_sync_slave_status: 1 node3_Rpl_semi_sync_slave_status: 1 node1_Rpl_semi_sync_master_no_tx: 0 node1_Rpl_semi_sync_master_status: 1 node1_Rpl_semi_sync_master_tx_avg_wait_time: 384[ 10s ] thds: 16 tps: 1218.20 qps: 24350.89 (r/w/o: 17042.27/4872.22/2436.41) lat (ms,95%): 23.95 err/s 0.00 reconn/s: 0.00ctime: 2020-04-05 16:27:49 node2: 0 node3: 0 node2_Rpl_semi_sync_slave_status: 1 node3_Rpl_semi_sync_slave_status: 1 node1_Rpl_semi_sync_master_no_tx: 0 node1_Rpl_semi_sync_master_status: 1 node1_Rpl_semi_sync_master_tx_avg_wait_time: 395[ 15s ] thds: 16 tps: 1176.41 qps: 23537.13 (r/w/o: 16477.89/4706.43/2352.81) lat (ms,95%): 24.38 err/s 0.00 reconn/s: 0.00
格式化
grep -E "^\[|ctime" local_60g_40c_4.txt | sed -n 'N;s/\n/\ /p' |awk '{for(i=1;i<=NF;i++){printf "%s ", $i}; printf "\n"}'|sed -e "s/\[\ /\{'time':'/g" -e "s/\ \]//g" -e "s/(r\/w\/o\:[^\s]*) //g" -e "s/ (ms\,95%)//g" -e "s/err\/s/err\/s:/g"|sed -r "s/([0-9]{4}-[0-9]{2}-[0-9]{2}) ([0-9]{2}:[0-9]{2}:[0-9]{2})/\1T\2/g" |sed -e "s/:\ /':'/g" -e "s/\ /','/g" -e "s/,'$/\}/g" {'time':'5s','thds':'16','tps':'1269.60','qps':'25437.17','lat':'23.10','err/s':'0.00','reconn/s':'0.00','ctime':'2020-04-05T16:27:44','node2':'0','node3':'0','node2_Rpl_semi_sync_slave_status':'1','node3_Rpl_semi_sync_slave_status':'1','node1_Rpl_semi_sync_master_no_tx':'0','node1_Rpl_semi_sync_master_status':'1','node1_Rpl_semi_sync_master_tx_avg_wait_time':'384'}{'time':'10s','thds':'16','tps':'1218.20','qps':'24350.89','lat':'23.95','err/s':'0.00','reconn/s':'0.00','ctime':'2020-04-05T16:27:49','node2':'0','node3':'0','node2_Rpl_semi_sync_slave_status':'1','node3_Rpl_semi_sync_slave_status':'1','node1_Rpl_semi_sync_master_no_tx':'0','node1_Rpl_semi_sync_master_status':'1','node1_Rpl_semi_sync_master_tx_avg_wait_time':'395'}
能用但丑陋…
今天决定好好整一下
比如我们测半同步, 想压测的时候看看这些指标
node2, node3是两个半同步从库, node1是主库node2 --代表node2延迟, 通过自己脚本造数据查询计算node3 --代表node3延迟, 通过自己脚本造数据查询计算下面就是show global status中的值node2_Rpl_semi_sync_slave_statusnode3_Rpl_semi_sync_slave_statusnode1_Rpl_semi_sync_master_no_txnode1_Rpl_semi_sync_master_statusnode1_Rpl_semi_sync_master_tx_avg_wait_time
那么实例演示开始:
node1建个表
CREATE TABLE `monitor_delay` ( `id` bigint(20) unsigned NOT NULL AUTO_INCREMENT, `ctime` datetime NOT NULL, `node2` int(11) NOT NULL, `node3` int(11) NOT NULL, `node2_Rpl_semi_sync_slave_status` tinyint unsigned not null default 0, `node3_Rpl_semi_sync_slave_status` tinyint unsigned not null default 0, `node1_Rpl_semi_sync_master_no_tx` int unsigned not null default 0, `node1_Rpl_semi_sync_master_status` int unsigned not null default 0, `node1_Rpl_semi_sync_master_tx_avg_wait_time` int unsigned not null default 0, PRIMARY KEY (`id`)) ENGINE=InnoDB AUTO_INCREMENT=1 DEFAULT CHARSET=utf8;初始化一条数据insert into monitor_delay(ctime,node2,node3) values(now(),0,0);
通过脚本每秒插入数据, 同时取从库延迟和status等值写入node1的monitor_delay
# 很丑陋, 但能用, 反正只是测测import timeimport datetimeimport pymysqldef get_slave_ctime(conn): cursor = conn.cursor(pymysql.cursors.DictCursor) cursor.execute('select id,ctime from monitor_delay order by id desc limit 1') return cursor.fetchall()[0]def get_master_ctime(conn, id): cursor = conn.cursor(pymysql.cursors.DictCursor) cursor.execute('select ctime from monitor_delay where id=%s' % id) return cursor.fetchall()[0]['ctime']def insert_delay(conn): cursor = conn.cursor(pymysql.cursors.DictCursor) cursor.execute("insert into monitor_delay(ctime,node2,node3,node2_Rpl_semi_sync_slave_status, node3_Rpl_semi_sync_slave_status, node1_Rpl_semi_sync_master_no_tx, node1_Rpl_semi_sync_master_status, node1_Rpl_semi_sync_master_tx_avg_wait_time) values(now(),%s,%s,%s,%s,%s,%s,%s)" % (node2_diff, node3_diff, node2_Rpl_semi_sync_slave_status, node3_Rpl_semi_sync_slave_status, node1_Rpl_semi_sync_master_no_tx, node1_Rpl_semi_sync_master_status, node1_Rpl_semi_sync_master_tx_avg_wait_time))def get_status(conn, variable_name): cursor = conn.cursor(pymysql.cursors.DictCursor) if variable_name=='Rpl_semi_sync_slave_status' or variable_name=='Rpl_semi_sync_master_status': cursor.execute("select case when VARIABLE_VALUE='ON' then 1 else 0 end as value from performance_schema.global_status where VARIABLE_NAME='%s'" % variable_name) else: cursor.execute("select VARIABLE_VALUE value from performance_schema.global_status where VARIABLE_NAME='%s'" % variable_name) return cursor.fetchall()[0]['value']while True: conn1 = pymysql.connect(host='192.168.x.1', port=3307, user='fanboshi', password='123', db='sysbench', charset='utf8', autocommit=True) conn2 = pymysql.connect(host='192.168.x.2', port=3307, user='fanboshi', password='123', db='sysbench', charset='utf8', autocommit=True) conn3 = pymysql.connect(host='192.168.x.3', port=3307, user='fanboshi', password='123', db='sysbench', charset='utf8', autocommit=True) node2_id, node2_ctime = get_slave_ctime(conn2).values() node3_id, node3_ctime = get_slave_ctime(conn3).values() ctime2 = get_master_ctime(conn1, node2_id) ctime3 = get_master_ctime(conn1, node3_id) node2_diff = str((ctime2 - node2_ctime).seconds) node3_diff = str((ctime3 - node3_ctime).seconds) node2_Rpl_semi_sync_slave_status = get_status(conn2, 'Rpl_semi_sync_slave_status') node3_Rpl_semi_sync_slave_status = get_status(conn3, 'Rpl_semi_sync_slave_status') node1_Rpl_semi_sync_master_no_tx = get_status(conn1, 'Rpl_semi_sync_master_no_tx') node1_Rpl_semi_sync_master_status = get_status(conn1, 'Rpl_semi_sync_master_status') node1_Rpl_semi_sync_master_tx_avg_wait_time = get_status(conn1, 'Rpl_semi_sync_master_tx_avg_wait_time') insert_delay(conn1) print(ctime2,node2_diff,ctime3,node3_diff) conn1.close() conn2.close() conn3.close() time.sleep(1)
启动这个脚本, 让他一直运行
我们的sysbench命令类似这样
sysbench /usr/local/share/sysbench/oltp_read_write.lua --mysql-user=fanboshi --mysql-password=123 --mysql-port=3307 --mysql-host=192.168.x.1 --mysql-db=sysbench --tables=10 --table-size=500000000 --db-driver=mysql --threads=4 --report-interval=5 --time=3600 run
所以修改/usr/local/share/sysbench/oltp_read_write.lua
, 添加如下内容
sysbench.hooks.report_intermediate = function (stat) if con == nil then con = assert(sysbench.sql.driver():connect()) end ctime, node2, node3, node2_Rpl_semi_sync_slave_status, node3_Rpl_semi_sync_slave_status, node1_Rpl_semi_sync_master_no_tx, node1_Rpl_semi_sync_master_status, node1_Rpl_semi_sync_master_tx_avg_wait_time = con:query_row([[ select ctime, node2, node3, node2_Rpl_semi_sync_slave_status, node3_Rpl_semi_sync_slave_status, node1_Rpl_semi_sync_master_no_tx, node1_Rpl_semi_sync_master_status, node1_Rpl_semi_sync_master_tx_avg_wait_time from sysbench.monitor_delay order by id desc limit 1;]]) -- print("ctime: "..ctime.." node2: "..node2.." node3: "..node3.." node2_Rpl_semi_sync_slave_status: "..node2_Rpl_semi_sync_slave_status.." node3_Rpl_semi_sync_slave_status: "..node3_Rpl_semi_sync_slave_status.." node1_Rpl_semi_sync_master_no_tx: "..node1_Rpl_semi_sync_master_no_tx.." node1_Rpl_semi_sync_master_status: "..node1_Rpl_semi_sync_master_status.." node1_Rpl_semi_sync_master_tx_avg_wait_time: "..node1_Rpl_semi_sync_master_tx_avg_wait_time) stat["ctime"] = ctime stat["node2"] = node2 stat["node3"] = node3 stat["node2_Rpl_semi_sync_slave_status"] = node2_Rpl_semi_sync_slave_status stat["node3_Rpl_semi_sync_slave_status"] = node3_Rpl_semi_sync_slave_status stat["node1_Rpl_semi_sync_master_no_tx"] = node1_Rpl_semi_sync_master_no_tx stat["node1_Rpl_semi_sync_master_status"] = node1_Rpl_semi_sync_master_status stat["node1_Rpl_semi_sync_master_tx_avg_wait_time"] = node1_Rpl_semi_sync_master_tx_avg_wait_time sysbench.report_json(stat)end
解释一下
默认sysbench使用sysbench.report_default(stat)
输出, stat是个table, 所以我们往里面加数据就好了
然后再修改sysbench.report_json
, 这个函数定义在/root/sysbench/src/lua/internal/sysbench.lua
中(这里面还有report_csv和report_default)
function sysbench.report_json(stat) if not gobj then io.write('[\n') -- hack to print the closing bracket when the Lua state of the reporting -- thread is closed gobj = newproxy(true) getmetatable(gobj).__gc = function () io.write('\n]\n') end else io.write(',\n') end local seconds = stat.time_interval io.write(([[ { "time": %4.0f, "threads": %u, "tps": %4.2f, "qps": { "total": %4.2f, "reads": %4.2f, "writes": %4.2f, "other": %4.2f }, "latency": %4.2f, "errors": %4.2f, "reconnects": %4.2f }]]):format( stat.time_total, stat.threads_running, stat.events / seconds, (stat.reads + stat.writes + stat.other) / seconds, stat.reads / seconds, stat.writes / seconds, stat.other / seconds, stat.latency_pct * 1000, stat.errors / seconds, stat.reconnects / seconds ))end
我们直接在oltp_read_write.lua
中重新定义即可. 我不会lua, 但是改这个比较简单, 抄一下
function sysbench.report_json(stat) if not gobj then io.write('[\n') -- hack to print the closing bracket when the Lua state of the reporting -- thread is closed gobj = newproxy(true) getmetatable(gobj).__gc = function () io.write('\n]\n') end else io.write(',\n') end local seconds = stat.time_interval io.write(([[ { "time": %4.0f, "threads": %u, "tps": %4.2f, "qps": { "total": %4.2f, "reads": %4.2f, "writes": %4.2f, "other": %4.2f }, "node2": %u, "node3": %u, "node2_Rpl_semi_sync_slave_status": %u, "node3_Rpl_semi_sync_slave_status": %u, "node1_Rpl_semi_sync_master_no_tx": %u, "node1_Rpl_semi_sync_master_status": %u, "node1_Rpl_semi_sync_master_tx_avg_wait_time": %u, "latency": %4.2f, "errors": %4.2f, "reconnects": %4.2f }]]):format( stat.time_total, stat.threads_running, stat.events / seconds, (stat.reads + stat.writes + stat.other) / seconds, stat.reads / seconds, stat.writes / seconds, stat.other / seconds, stat.node2, stat.node3, stat.node2_Rpl_semi_sync_slave_status, stat.node3_Rpl_semi_sync_slave_status, stat.node1_Rpl_semi_sync_master_no_tx, stat.node1_Rpl_semi_sync_master_status, stat.node1_Rpl_semi_sync_master_tx_avg_wait_time, stat.latency_pct * 1000, stat.errors / seconds, stat.reconnects / seconds ))end
最终输出如下:
[root@node3 sysbench]# sysbench /usr/local/share/sysbench/oltp_read_write.lua --mysql-user=fanboshi --mysql-password=123 --mysql-port=3307 --mysql-host=192.168.x.1 --mysql-db=sysbench --tables=10 --table-size=500000000 --db-driver=mysql --threads=4 --report-interval=5 --time=3600 runsysbench 1.1.0-bd4b418 (using bundled LuaJIT 2.1.0-beta3)Running the test with following options:Number of threads: 4Report intermediate results every 5 second(s)Initializing random number generator from current timeInitializing worker threads...Threads started![ { "time": 5, "threads": 4, "tps": 143.93, "qps": { "total": 2889.17, "reads": 2024.40, "writes": 576.12, "other": 288.66 }, "node2": 0, "node3": 0, "node2_Rpl_semi_sync_slave_status": 1, "node3_Rpl_semi_sync_slave_status": 1, "node1_Rpl_semi_sync_master_no_tx": 0, "node1_Rpl_semi_sync_master_status": 1, "node1_Rpl_semi_sync_master_tx_avg_wait_time": 468, "latency": 51.02, "errors": 0.00, "reconnects": 0.00 }, { "time": 10, "threads": 4, "tps": 172.61, "qps": { "total": 3455.75, "reads": 2418.11, "writes": 692.43, "other": 345.22 }, "node2": 0, "node3": 0, "node2_Rpl_semi_sync_slave_status": 1, "node3_Rpl_semi_sync_slave_status": 1, "node1_Rpl_semi_sync_master_no_tx": 0, "node1_Rpl_semi_sync_master_status": 1, "node1_Rpl_semi_sync_master_tx_avg_wait_time": 468, "latency": 38.94, "errors": 0.00, "reconnects": 0.00 }^C
转载地址:http://akvub.baihongyu.com/