7
7
from lib import shell_operator
8
8
import random
9
9
import time
10
- from lib import db_operator
11
10
import threading
12
11
import time
13
12
import mythread
@@ -188,19 +187,19 @@ def clear_RecycleBin():
188
187
ssh = shell_operator .create_ssh_connect (host , 1046 , config .abnormal_user )
189
188
ori_cmd = "curve_ops_tool clean-recycle --isTest"
190
189
rs = shell_operator .ssh_exec (ssh , ori_cmd )
191
- assert rs [3 ] == 0 ,"clean RecyclenBin失败, msg is %s" % rs [1 ]
190
+ assert rs [3 ] == 0 ,"clean RecyclenBin fail, msg is %s" % rs [1 ]
192
191
starttime = time .time ()
193
192
ori_cmd = "curve_ops_tool list -fileName=/RecycleBin |grep Total"
194
193
while time .time () - starttime < 180 :
195
194
rs = shell_operator .ssh_exec (ssh , ori_cmd )
196
195
if "" .join (rs [1 ]).strip () == "Total file number: 0" and rs [3 ] == 0 :
197
196
break
198
197
else :
199
- logger .debug ("删除中 " )
198
+ logger .debug ("deleting " )
200
199
if rs [3 ] != 0 :
201
- logger .debug ("list /RecycleBin 失败 ,error is %s" % rs [1 ])
200
+ logger .debug ("list /RecycleBin fail ,error is %s" % rs [1 ])
202
201
time .sleep (3 )
203
- assert rs [3 ] == 0 ,"删除 /RecycleBin 失败, error is %s" % rs [1 ]
202
+ assert rs [3 ] == 0 ,"delete /RecycleBin fail, error is %s" % rs [1 ]
204
203
205
204
def loop_map_unmap_file ():
206
205
thread = []
@@ -274,15 +273,6 @@ def write_full_disk(fio_size):
274
273
assert rs [3 ] == 0 ,"write fio fail"
275
274
276
275
def get_chunkserver_id (host ,cs_id ):
277
- # conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
278
- # sql = R"select * from curve_chunkserver where `internalHostIP` like '%s' and `mountPoint` like 'local:///data/chunkserver%d/' and `rwstatus` like 0;;"%(host,cs_id)
279
- # chunkserver = db_operator.query_db(conn, sql)
280
- # if chunkserver["rowcount"] == 1:
281
- # chunkserver_id = chunkserver["data"][0]["chunkServerID"]
282
- # logger.info("operator chunkserver id is %d"%chunkserver_id)
283
- # else:
284
- # assert False,"get chunkserver id fail,retun is %s"%(chunkserver)
285
- # return -1
286
276
client_host = config .client_list [0 ]
287
277
logger .info ("|------begin get chunkserver %s id %d------|" % (host ,cs_id ))
288
278
cmd = "curve_ops_tool chunkserver-list | grep %s |grep -w chunkserver%d" % (host ,cs_id )
@@ -296,18 +286,6 @@ def get_chunkserver_id(host,cs_id):
296
286
return - 1
297
287
298
288
def get_cs_copyset_num (host ,cs_id ):
299
- # conn = db_operator.conn_db(config.abnormal_db_host, config.db_port, config.db_user, config.db_pass, config.mds_db_name)
300
- # try:
301
- # sql = R"select * from curve_copyset where chunkServerIDList REGEXP '\n\t%d,|,\n\t%d,|,\n\t%d\n';"\
302
- # %(chunkserver_id,chunkserver_id,chunkserver_id)
303
- # cs_copyset_info = db_operator.query_db(conn, sql)
304
- # logger.debug("get table row is %s"%cs_copyset_info["rowcount"])
305
- # logger.debug("get table %s" %(cs_copyset_info))
306
- # except Exception:
307
- # logger.error("get db fail.")
308
- # raise
309
- # logger.info("chunkserver id %d have %s copysets"%(chunkserver_id,cs_copyset_info["rowcount"]))
310
- # return int(cs_copyset_info["rowcount"])
311
289
client_host = config .client_list [0 ]
312
290
cs_number = int (cs_id ) + 8200
313
291
cmd = "curve_ops_tool check-chunkserver -chunkserverAddr=%s:%d |grep 'total copysets'" % (host ,cs_number )
@@ -342,27 +320,27 @@ def map_nbd():
342
320
ssh = shell_operator .create_ssh_connect (client_host , 1046 , config .abnormal_user )
343
321
cmd = "curve create --filename /fiofile --length 10 --user test"
344
322
rs = shell_operator .ssh_exec (ssh , cmd )
345
- assert rs [3 ] == 0 ,"创建卷 /fiofile 失败,失败原因 :%s" % rs [2 ]
323
+ assert rs [3 ] == 0 ,"create /fiofile fail :%s" % rs [2 ]
346
324
cmd = "curve create --filename /vdbenchfile --length 10 --user test"
347
325
rs = shell_operator .ssh_exec (ssh , cmd )
348
- assert rs [3 ] == 0 ,"创建卷 /vdbenchfile 失败,失败原因 :%s" % rs [2 ]
326
+ assert rs [3 ] == 0 ,"create /vdbenchfile fail :%s" % rs [2 ]
349
327
time .sleep (3 )
350
328
cmd = "sudo curve-nbd map cbd:pool1//fiofile_test_ >/dev/null 2>&1"
351
329
rs = shell_operator .ssh_exec (ssh , cmd )
352
- assert rs [3 ] == 0 ,"map fiofile 失败,失败原因 :%s" % rs [2 ]
330
+ assert rs [3 ] == 0 ,"map fiofile fail :%s" % rs [2 ]
353
331
cmd = "sudo curve-nbd map cbd:pool1//vdbenchfile_test_ >/dev/null 2>&1"
354
332
rs = shell_operator .ssh_exec (ssh , cmd )
355
- assert rs [3 ] == 0 ,"map vdbenchfile 失败,失败原因 :%s" % rs [2 ]
333
+ assert rs [3 ] == 0 ,"map vdbenchfile fail :%s" % rs [2 ]
356
334
357
335
def delete_nbd ():
358
336
client_host = config .client_list [0 ]
359
337
ssh = shell_operator .create_ssh_connect (client_host , 1046 , config .abnormal_user )
360
338
cmd = "curve delete --filename /fiofile --user test"
361
339
rs = shell_operator .ssh_exec (ssh , cmd )
362
- assert rs [3 ] == 0 ,"删除卷 /fiofile 失败,失败原因 :%s" % rs [2 ]
340
+ assert rs [3 ] == 0 ,"delete /fiofile fail :%s" % rs [2 ]
363
341
cmd = "curve delete --filename /vdbenchfile --user test"
364
342
rs = shell_operator .ssh_exec (ssh , cmd )
365
- assert rs [3 ] == 0 ,"删除卷 /vdbenchfile 失败,失败原因 :%s" % rs [2 ]
343
+ assert rs [3 ] == 0 ,"delete /vdbenchfile fail :%s" % rs [2 ]
366
344
367
345
def check_host_connect (ip ):
368
346
cmd = "ping %s -w3" % ip
@@ -751,7 +729,6 @@ def rapid_leader_schedule():
751
729
ori_cmd = "curve_ops_tool rapid-leader-schedule"
752
730
rs = shell_operator .ssh_exec (ssh , ori_cmd )
753
731
assert rs [3 ] == 0 ,"rapid leader schedule not ok"
754
- # 等待rapid leader schedule执行完成
755
732
ori_cmd = "curve_ops_tool check-operator -opName=transfer_leader -leaderOpInterval=1| grep \" Operator num is\" "
756
733
starttime = time .time ()
757
734
while time .time () - starttime < 60 :
@@ -764,7 +741,6 @@ def rapid_leader_schedule():
764
741
765
742
def wait_cluster_healthy (limit_iops = 8000 ):
766
743
check_chunkserver_online ()
767
- #检测集群整体状态
768
744
host = random .choice (config .mds_list )
769
745
ssh = shell_operator .create_ssh_connect (host , 1046 , config .abnormal_user )
770
746
ori_cmd = "curve_ops_tool status | grep \" cluster is\" "
@@ -789,7 +765,6 @@ def wait_cluster_healthy(limit_iops=8000):
789
765
logger .debug ("copysets status is %s" % copysets_status )
790
766
assert check == 1 ,"cluster is not healthy in %d s,cluster status is:\n %s,copysets status is:\n %s" % (config .recover_time ,cluster_status ,copysets_status )
791
767
rapid_leader_schedule ()
792
- #检测nbd iops
793
768
ssh = shell_operator .create_ssh_connect (config .client_list [0 ], 1046 , config .abnormal_user )
794
769
i = 0
795
770
while i < 300 :
@@ -824,7 +799,6 @@ def check_io_error():
824
799
ssh .close ()
825
800
826
801
def check_copies_consistency ():
827
- # 快速leader均衡
828
802
host = random .choice (config .mds_list )
829
803
ssh = shell_operator .create_ssh_connect (host , 1046 , config .abnormal_user )
830
804
ori_cmdpri = "curve_ops_tool check-consistency -filename=/fiofile \
@@ -1738,7 +1712,6 @@ def perf_test():
1738
1712
assert rs [3 ] == 0 ,"cp fiodata fail,error is %s" % rs [1 ]
1739
1713
analysis_data (ssh )
1740
1714
1741
- #增加数据盘
1742
1715
def add_data_disk ():
1743
1716
ori_cmd = "bash attach_thrash.sh"
1744
1717
ssh = shell_operator .create_ssh_connect (config .nova_host , 1046 , config .nova_user )
@@ -1818,7 +1791,6 @@ def get_all_curvevm_active_num(num):
1818
1791
assert status == "up" ,"get vm status fail,not up.is %s,current vm id is %s" % (status ,uuid )
1819
1792
return active_num
1820
1793
1821
- # 用于初始化创建50个curve系统盘的云主机。先从镜像创建一个curve云主机,再从该云主机创建自定义镜像和克隆50个云主机。每个云主机会自动拉起1000 iops的io
1822
1794
def init_create_curve_vm (num ):
1823
1795
image_id = config .image_id
1824
1796
salt = '' .join (random .sample (string .ascii_letters + string .digits , 8 ))
@@ -1885,7 +1857,6 @@ def do_thrasher(action):
1885
1857
logger .debug ("开始启动故障XXXXXXXXXXXXXXXXXXX %s,%s XXXXXXXXXXXXXXXXXXXXXX" % (action [0 ],str (action [1 ])))
1886
1858
globals ()[action [0 ]](action [1 ])
1887
1859
1888
- #加回所有retired的chunkserver
1889
1860
def start_retired_and_down_chunkservers ():
1890
1861
for host in config .chunkserver_list :
1891
1862
ssh = shell_operator .create_ssh_connect (host , 1046 , config .abnormal_user )
0 commit comments