Skip to content

Commit cb91aaa

Browse files
committed
update log and db_operator
1 parent fa11b25 commit cb91aaa

File tree

10 files changed

+20
-978
lines changed

10 files changed

+20
-978
lines changed

robot/Resources/config/config.py

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,9 +1,6 @@
11
#!/usr/bin/env python
22
# -*- coding: utf8 -*-
33

4-
'''
5-
基本配置信息,若配置新的环境,需要更改此文件
6-
'''
74
#curve info
85
curve_workspace = "/var/lib/jenkins/workspace/curve_failover/"
96
chunkserver_start_script = "./deploy/local/chunkserver/start_chunkservers_locally.sh"

robot/Resources/keywords/base_operate.py

Lines changed: 0 additions & 87 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
import shlex
66
from config import config
77
from logger.logger import *
8-
from lib import db_operator
98
from lib import shell_operator
109
from swig import swig_operate
1110
from multiprocessing import Pool
@@ -17,61 +16,6 @@
1716
import types
1817
import mythread
1918

20-
#clean_db
21-
def clean_db():
22-
try:
23-
cmd_list = ["DELETE FROM curve_logicalpool;", "DELETE FROM curve_copyset;", \
24-
"DELETE FROM curve_physicalpool;", "DELETE FROM curve_zone;", \
25-
"DELETE FROM curve_server;", "DELETE FROM curve_chunkserver;", \
26-
"DELETE FROM curve_session;", "DELETE FROM client_info;"]
27-
for cmd in cmd_list:
28-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
29-
db_operator.exec_sql(conn, cmd)
30-
logger.debug("clean db %s" %cmd)
31-
32-
except Exception:
33-
logger.error("clean db fail.")
34-
raise
35-
36-
def create_db_table():
37-
try:
38-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
39-
db_operator.exec_sql_file(conn, config.curve_sql)
40-
conn2 = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
41-
db_operator.exec_sql_file(conn2, config.snap_sql)
42-
43-
except Exception:
44-
logger.error("创建表失败.")
45-
raise
46-
47-
48-
def drop_mds_table():
49-
try:
50-
cmd_list = ["DROP TABLE curve_logicalpool;", "DROP TABLE curve_copyset;", \
51-
"DROP TABLE curve_physicalpool;", "DROP TABLE curve_zone;", \
52-
"DROP TABLE curve_server;", "DROP TABLE curve_chunkserver;", \
53-
"DROP TABLE curve_session;", "DROP TABLE client_info;"]
54-
for cmd in cmd_list:
55-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
56-
db_operator.exec_sql(conn, cmd)
57-
logger.debug("drop table %s" %cmd)
58-
except Exception:
59-
logger.error("drop db fail.")
60-
raise
61-
62-
def get_copyset_table():
63-
try:
64-
cmd = "select copySetID,chunkServerIDList from curve_copyset INTO OUTFILE " + '"'+config.mysql_log +'"'+ \
65-
" fields terminated by '|' lines terminated by '|'; "
66-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
67-
db_operator.exec_sql(conn, cmd)
68-
logger.debug("get table %s" %cmd)
69-
rc = os.path.isfile(config.mysql_log)
70-
assert rc == True,"exec %s"%cmd
71-
except Exception:
72-
logger.error("get copyset table fail.cmd is %s"%cmd)
73-
raise
74-
7519
def mv_copyset_table():
7620
grep_cmd = "mv %s %s"%(config.mysql_log,config.curve_workspace)
7721
try:
@@ -130,30 +74,6 @@ def get_copyset_scatterwith():
13074
logger.info("chunkserver %d copyset_num is %d \t,scatterwith is %d %s"%(cs,cs_copyset_num,len(scatterwith),scatterwith))
13175
# print "chunkserver %d ,scatterwith is %d" % (cs, len(scatterwith))
13276

133-
def drop_snap_clone_table():
134-
try:
135-
cmd_list = ["DROP TABLE snapshot;", "DROP TABLE clone;"]
136-
for cmd in cmd_list:
137-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.snap_db_name)
138-
db_operator.exec_sql(conn, cmd)
139-
logger.debug("drop table %s" %cmd)
140-
except Exception:
141-
logger.error("drop db fail.")
142-
raise
143-
144-
def mock_chunkserver_registe():
145-
try:
146-
mysql_cmd = ["INSERT INTO curve_chunkserver VALUES (31, 'token1', 'nvme', '127.0.0.1', 8200, 0, 1, 0, 0, '/', 0, 0);",
147-
"INSERT INTO curve_chunkserver VALUES (32, 'token2', 'nvme', '127.0.0.1', 8201, 0, 2, 0, 0, '/', 0, 0);",
148-
"INSERT INTO curve_chunkserver VALUES (33, 'token3', 'nvme', '127.0.0.1', 8202, 0, 3, 0, 0, '/', 0, 0);"]
149-
for cmd in mysql_cmd:
150-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
151-
db_operator.exec_sql(conn, cmd)
152-
logger.debug("insert db %s" % cmd)
153-
except Exception:
154-
logger.error("insert db fail. %s" % cmd)
155-
raise
156-
15777

15878
def kill_process(process_name):
15979
grep_cmd = "ps -ef | grep %s | grep -v grep | awk '{print $2}' " %process_name
@@ -432,13 +352,6 @@ def cat_chunkserver_log(chunkserver_id):
432352
else:
433353
logger.error("chunkserver log not exists!")
434354

435-
def check_copyset_num(copyset_num):
436-
sql = "select * from curve_copyset;"
437-
conn = db_operator.conn_db(config.db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
438-
logicalpool_dbinof = db_operator.query_db(conn, sql)
439-
logger.info("logicalpool_dbinof = %s" % int(logicalpool_dbinof["rowcount"]))
440-
assert int(logicalpool_dbinof["rowcount"]) == int(copyset_num)
441-
442355
def get_buf():
443356
return config.buf
444357

robot/Resources/keywords/deploy.py

Lines changed: 0 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,6 @@
55
from config import config
66
from logger.logger import *
77
from lib import shell_operator
8-
from lib import db_operator
98
import threading
109
import random
1110
import time
@@ -279,35 +278,6 @@ def drop_all_chunkserver_dat():
279278
logger.debug("drop cs dat get result is %d" % t.get_result())
280279
assert t.get_result() == 0
281280

282-
def drop_abnormal_test_db():
283-
try:
284-
cmd_list = ["DROP TABLE curve_logicalpool;", "DROP TABLE curve_copyset;", \
285-
"DROP TABLE curve_physicalpool;", "DROP TABLE curve_zone;", \
286-
"DROP TABLE curve_server;", "DROP TABLE curve_chunkserver;", \
287-
"DROP TABLE curve_session;", "DROP TABLE client_info;"]
288-
cmd_list_2 = ["DROP TABLE clone;","DROP TABLE snapshot;"]
289-
for cmd in cmd_list:
290-
conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
291-
db_operator.exec_sql(conn, cmd)
292-
logger.debug("drop table %s" %cmd)
293-
for cmd in cmd_list_2:
294-
conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.snap_db_name)
295-
db_operator.exec_sql(conn, cmd)
296-
logger.debug("drop table %s" %cmd)
297-
except Exception:
298-
logger.error("drop db fail.")
299-
raise
300-
301-
def create_abnormal_db_table():
302-
try:
303-
conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
304-
db_operator.exec_sql_file(conn, config.curve_sql)
305-
conn2 = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
306-
db_operator.exec_sql_file(conn2, config.snap_sql)
307-
except Exception:
308-
logger.error("创建表失败.")
309-
raise
310-
311281
def install_deb():
312282
try:
313283
# mkdeb_url = config.curve_workspace + "mk-deb.sh"
@@ -427,13 +397,6 @@ def start_abnormal_test_services():
427397
logger.error("up servers fail.")
428398
raise
429399

430-
def get_copyset_num():
431-
conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
432-
sql = R"select * from curve_copyset;"
433-
copyset = db_operator.query_db(conn, sql)
434-
logger.info("now copyset num is %d"%copyset["rowcount"])
435-
return int(copyset["rowcount"])
436-
437400
def create_pool():
438401
ssh = shell_operator.create_ssh_connect(config.mds_list[0], 1046, config.abnormal_user)
439402
mds = []

robot/Resources/keywords/fault_inject.py

Lines changed: 10 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,6 @@
77
from lib import shell_operator
88
import random
99
import time
10-
from lib import db_operator
1110
import threading
1211
import time
1312
import mythread
@@ -188,19 +187,19 @@ def clear_RecycleBin():
188187
ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user)
189188
ori_cmd = "curve_ops_tool clean-recycle --isTest"
190189
rs = shell_operator.ssh_exec(ssh, ori_cmd)
191-
assert rs[3] == 0,"clean RecyclenBin失败,msg is %s"%rs[1]
190+
assert rs[3] == 0,"clean RecyclenBin fail,msg is %s"%rs[1]
192191
starttime = time.time()
193192
ori_cmd = "curve_ops_tool list -fileName=/RecycleBin |grep Total"
194193
while time.time() - starttime < 180:
195194
rs = shell_operator.ssh_exec(ssh, ori_cmd)
196195
if "".join(rs[1]).strip() == "Total file number: 0" and rs[3] == 0:
197196
break
198197
else:
199-
logger.debug("删除中")
198+
logger.debug("deleting")
200199
if rs[3] != 0:
201-
logger.debug("list /RecycleBin 失败,error is %s"%rs[1])
200+
logger.debug("list /RecycleBin fail,error is %s"%rs[1])
202201
time.sleep(3)
203-
assert rs[3] == 0,"删除/RecycleBin 失败,error is %s"%rs[1]
202+
assert rs[3] == 0,"delete /RecycleBin fail,error is %s"%rs[1]
204203

205204
def loop_map_unmap_file():
206205
thread = []
@@ -274,15 +273,6 @@ def write_full_disk(fio_size):
274273
assert rs[3] == 0,"write fio fail"
275274

276275
def get_chunkserver_id(host,cs_id):
277-
# conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
278-
# sql = R"select * from curve_chunkserver where `internalHostIP` like '%s' and `mountPoint` like 'local:///data/chunkserver%d/' and `rwstatus` like 0;;"%(host,cs_id)
279-
# chunkserver = db_operator.query_db(conn, sql)
280-
# if chunkserver["rowcount"] == 1:
281-
# chunkserver_id = chunkserver["data"][0]["chunkServerID"]
282-
# logger.info("operator chunkserver id is %d"%chunkserver_id)
283-
# else:
284-
# assert False,"get chunkserver id fail,retun is %s"%(chunkserver)
285-
# return -1
286276
client_host = config.client_list[0]
287277
logger.info("|------begin get chunkserver %s id %d------|"%(host,cs_id))
288278
cmd = "curve_ops_tool chunkserver-list | grep %s |grep -w chunkserver%d"%(host,cs_id)
@@ -296,18 +286,6 @@ def get_chunkserver_id(host,cs_id):
296286
return -1
297287

298288
def get_cs_copyset_num(host,cs_id):
299-
# conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
300-
# try:
301-
# sql = R"select * from curve_copyset where chunkServerIDList REGEXP '\n\t%d,|,\n\t%d,|,\n\t%d\n';"\
302-
# %(chunkserver_id,chunkserver_id,chunkserver_id)
303-
# cs_copyset_info = db_operator.query_db(conn, sql)
304-
# logger.debug("get table row is %s"%cs_copyset_info["rowcount"])
305-
# logger.debug("get table %s" %(cs_copyset_info))
306-
# except Exception:
307-
# logger.error("get db fail.")
308-
# raise
309-
# logger.info("chunkserver id %d have %s copysets"%(chunkserver_id,cs_copyset_info["rowcount"]))
310-
# return int(cs_copyset_info["rowcount"])
311289
client_host = config.client_list[0]
312290
cs_number = int(cs_id) + 8200
313291
cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'"%(host,cs_number)
@@ -342,27 +320,27 @@ def map_nbd():
342320
ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user)
343321
cmd = "curve create --filename /fiofile --length 10 --user test"
344322
rs = shell_operator.ssh_exec(ssh, cmd)
345-
assert rs[3] == 0,"创建卷/fiofile 失败,失败原因:%s"%rs[2]
323+
assert rs[3] == 0,"create /fiofile fail:%s"%rs[2]
346324
cmd = "curve create --filename /vdbenchfile --length 10 --user test"
347325
rs = shell_operator.ssh_exec(ssh, cmd)
348-
assert rs[3] == 0,"创建卷/vdbenchfile 失败,失败原因:%s"%rs[2]
326+
assert rs[3] == 0,"create /vdbenchfile fail:%s"%rs[2]
349327
time.sleep(3)
350328
cmd = "sudo curve-nbd map cbd:pool1//fiofile_test_ >/dev/null 2>&1"
351329
rs = shell_operator.ssh_exec(ssh, cmd)
352-
assert rs[3] == 0,"map fiofile 失败,失败原因:%s"%rs[2]
330+
assert rs[3] == 0,"map fiofile fail:%s"%rs[2]
353331
cmd = "sudo curve-nbd map cbd:pool1//vdbenchfile_test_ >/dev/null 2>&1"
354332
rs = shell_operator.ssh_exec(ssh, cmd)
355-
assert rs[3] == 0,"map vdbenchfile 失败,失败原因:%s"%rs[2]
333+
assert rs[3] == 0,"map vdbenchfile fail:%s"%rs[2]
356334

357335
def delete_nbd():
358336
client_host = config.client_list[0]
359337
ssh = shell_operator.create_ssh_connect(client_host, 1046, config.abnormal_user)
360338
cmd = "curve delete --filename /fiofile --user test"
361339
rs = shell_operator.ssh_exec(ssh, cmd)
362-
assert rs[3] == 0,"删除卷/fiofile 失败,失败原因:%s"%rs[2]
340+
assert rs[3] == 0,"delete /fiofile fail:%s"%rs[2]
363341
cmd = "curve delete --filename /vdbenchfile --user test"
364342
rs = shell_operator.ssh_exec(ssh, cmd)
365-
assert rs[3] == 0,"删除卷/vdbenchfile 失败,失败原因:%s"%rs[2]
343+
assert rs[3] == 0,"delete /vdbenchfile fail:%s"%rs[2]
366344

367345
def check_host_connect(ip):
368346
cmd = "ping %s -w3"%ip
@@ -751,7 +729,6 @@ def rapid_leader_schedule():
751729
ori_cmd = "curve_ops_tool rapid-leader-schedule"
752730
rs = shell_operator.ssh_exec(ssh, ori_cmd)
753731
assert rs[3] == 0,"rapid leader schedule not ok"
754-
# 等待rapid leader schedule执行完成
755732
ori_cmd = "curve_ops_tool check-operator -opName=transfer_leader -leaderOpInterval=1| grep \"Operator num is\""
756733
starttime = time.time()
757734
while time.time() - starttime < 60:
@@ -764,7 +741,6 @@ def rapid_leader_schedule():
764741

765742
def wait_cluster_healthy(limit_iops=8000):
766743
check_chunkserver_online()
767-
#检测集群整体状态
768744
host = random.choice(config.mds_list)
769745
ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user)
770746
ori_cmd = "curve_ops_tool status | grep \"cluster is\""
@@ -789,7 +765,6 @@ def wait_cluster_healthy(limit_iops=8000):
789765
logger.debug("copysets status is %s"%copysets_status)
790766
assert check == 1,"cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s"%(config.recover_time,cluster_status,copysets_status)
791767
rapid_leader_schedule()
792-
#检测nbd iops
793768
ssh = shell_operator.create_ssh_connect(config.client_list[0], 1046, config.abnormal_user)
794769
i = 0
795770
while i < 300:
@@ -824,7 +799,6 @@ def check_io_error():
824799
ssh.close()
825800

826801
def check_copies_consistency():
827-
# 快速leader均衡
828802
host = random.choice(config.mds_list)
829803
ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user)
830804
ori_cmdpri = "curve_ops_tool check-consistency -filename=/fiofile \
@@ -1738,7 +1712,6 @@ def perf_test():
17381712
assert rs[3] == 0,"cp fiodata fail,error is %s"%rs[1]
17391713
analysis_data(ssh)
17401714

1741-
#增加数据盘
17421715
def add_data_disk():
17431716
ori_cmd = "bash attach_thrash.sh"
17441717
ssh = shell_operator.create_ssh_connect(config.nova_host, 1046, config.nova_user)
@@ -1818,7 +1791,6 @@ def get_all_curvevm_active_num(num):
18181791
assert status == "up","get vm status fail,not up.is %s,current vm id is %s"%(status,uuid)
18191792
return active_num
18201793

1821-
# 用于初始化创建50个curve系统盘的云主机。先从镜像创建一个curve云主机,再从该云主机创建自定义镜像和克隆50个云主机。每个云主机会自动拉起1000 iops的io
18221794
def init_create_curve_vm(num):
18231795
image_id = config.image_id
18241796
salt = ''.join(random.sample(string.ascii_letters + string.digits, 8))
@@ -1885,7 +1857,6 @@ def do_thrasher(action):
18851857
logger.debug("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX"%(action[0],str(action[1])))
18861858
globals()[action[0]](action[1])
18871859

1888-
#加回所有retired的chunkserver
18891860
def start_retired_and_down_chunkservers():
18901861
for host in config.chunkserver_list:
18911862
ssh = shell_operator.create_ssh_connect(host, 1046, config.abnormal_user)

0 commit comments

Comments
 (0)