diff --git a/0chain/artifacts/grafana-portainer.zip b/0chain/artifacts/grafana-portainer.zip index 00dd5afe..02b242fb 100644 Binary files a/0chain/artifacts/grafana-portainer.zip and b/0chain/artifacts/grafana-portainer.zip differ diff --git a/0chain/artifacts/miner-files.zip b/0chain/artifacts/miner-files.zip index 4e22d105..9cf0b820 100644 Binary files a/0chain/artifacts/miner-files.zip and b/0chain/artifacts/miner-files.zip differ diff --git a/0chain/artifacts/sharder-files.zip b/0chain/artifacts/sharder-files.zip index 427cb3ba..2a441417 100644 Binary files a/0chain/artifacts/sharder-files.zip and b/0chain/artifacts/sharder-files.zip differ diff --git a/0chain/artifacts/zwallet-binary.zip b/0chain/artifacts/zwallet-binary.zip index 867946d6..b7d64050 100644 Binary files a/0chain/artifacts/zwallet-binary.zip and b/0chain/artifacts/zwallet-binary.zip differ diff --git a/0chain/deploy_miner.sh b/0chain/deploy_miner.sh index 27e6550c..c7babe92 100644 --- a/0chain/deploy_miner.sh +++ b/0chain/deploy_miner.sh @@ -1,6 +1,13 @@ #!/bin/bash set -e +echo -e "\n\e[93m=============================================================================================================================================================================== + Updating initial-states.yaml file on your server +=============================================================================================================================================================================== \e[39m" +cd ~ +rm /var/0chain/initial_states.yaml || true +wget -N https://raw.githubusercontent.com/0chain/zcnwebappscripts/as-deploy/0chain/others/initial_states.yaml +mv initial_states.yaml /var/0chain/ echo -e "\n\e[93m=============================================================================================================================================================================== Installing yq on your server @@ -30,7 +37,7 @@ pushd ${PROJECT_ROOT} > /dev/null; #Miner Delegate wallet if [[ -f del_wal_id.txt ]] ; then echo -e "\e[32m Miner delegate wallet id present \e[23m \e[0;37m" - MINER_DEL=$(cat del_wal_id.txt) + export MINER_DEL=$(cat del_wal_id.txt) else echo "Unable to find miner delegate wallet" exit 1 @@ -65,7 +72,7 @@ pushd ${PROJECT_ROOT} > /dev/null; sudo cp -rf dkgSummary-* miner/ssd/docker.local/config sudo cp -f nodes.yaml miner/ssd/docker.local/config/nodes.yaml sudo cp -f b0magicBlock.json miner/ssd/docker.local/config/b0magicBlock.json - sudo cp -f initial_states.yaml miner/ssd/docker.local/config/initial_state.yaml + sudo cp -f initial_states.yaml miner/ssd/docker.local/config/initial_states.yaml fi popd > /dev/null; @@ -83,10 +90,19 @@ echo -e "\n\e[93m=============================================================== Updating for delegate wallet in 0chain.yaml =============================================================================================================================================================================== \e[39m" pushd ${PROJECT_ROOT}/miner/ssd > /dev/null; - yq e -i '.delegate_wallet = "${MINER_DEL}"' ./docker.local/config/0chain.yaml + yq e -i '.delegate_wallet = env(MINER_DEL)' ./docker.local/config/0chain.yaml echo -e "\e[32m Successfully Updated \e[23m \e[0;37m" popd > /dev/null; +echo -e "\n\e[93m=============================================================================================================================================================================== + Backing up keys and configs for miner. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + timestamp=archive-miner-$(date +"%Y-%m-%d-%T") + zip -r $timestamp.zip . + cp $timestamp.zip ~ +popd + echo -e "\n\e[93m=============================================================================================================================================================================== Starting miners =============================================================================================================================================================================== \e[39m" @@ -98,3 +114,6 @@ pushd ${PROJECT_ROOT}/miner/ssd/docker.local > /dev/null; #/miner/ssd cd ../ done popd > /dev/null; + +echo +echo "Please backup the $HOME/$timestamp.zip file to your local or to another server." diff --git a/0chain/deploy_miner_ssls.sh b/0chain/deploy_miner_ssls.sh index 39d41623..15668811 100644 --- a/0chain/deploy_miner_ssls.sh +++ b/0chain/deploy_miner_ssls.sh @@ -160,7 +160,7 @@ echo -e "\n\e[93m=============================================================== Deploying grafana and portainer =============================================================================================================================================================================== \e[39m" pushd ${PROJECT_ROOT}/grafana-portainer > /dev/null; #/miner/ssd - sudo chown 10001:10001 ./loki + sudo chown 1001:1001 ./loki bash ./start.p0monitor.sh ${HOST} admin ${PASSWORD} popd > /dev/null; @@ -183,12 +183,12 @@ pushd ${PROJECT_ROOT}/grafana-portainer/grafana > /dev/null; sleep 20s curl -X POST -H "Content-Type: application/json" \ - -d "@./server.json" \ + -d "{\"dashboard\":$(cat ./docker_system_monitoring.json)}" \ "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" - # curl -X POST -H "Content-Type: application/json" \ - # -d "@./docker_system_monitoring.json" \ - # "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" + curl -X POST -H "Content-Type: application/json" \ + -d "@./server.json" \ + "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" if [[ ${MINER} -gt 0 ]] ; then curl -X POST -H "Content-Type: application/json" \ @@ -213,3 +213,17 @@ echo "Grafana Username --> admin" echo "Grafana Password --> ${PASSWORD}" echo -e "\nPortainer Username --> admin" echo "Portainer Password --> ${PASSWORD}" + +echo -e "\n\e[93m=============================================================================================================================================================================== + Loki logs cleanup +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; +cat <loki-logs-cleanup-job.sh +docker stop loki +rm -rf /var/0chain/grafana-portainer/loki/chunks +docker start loki +EOF +sudo chmod +x loki-logs-cleanup-job.sh +echo "0 0 */3 * * ${PROJECT_ROOT}/loki-logs-cleanup-job.sh" > crontab_loki +crontab crontab_loki +popd > /dev/null; diff --git a/0chain/deploy_sharder.sh b/0chain/deploy_sharder.sh index b697fc86..46cfee1e 100644 --- a/0chain/deploy_sharder.sh +++ b/0chain/deploy_sharder.sh @@ -1,6 +1,13 @@ #!/bin/bash set -e +echo -e "\n\e[93m=============================================================================================================================================================================== + Updating initial-states.yaml file on your server +=============================================================================================================================================================================== \e[39m" +cd ~ +rm /var/0chain/initial_states.yaml || true +wget -N https://raw.githubusercontent.com/0chain/zcnwebappscripts/as-deploy/0chain/others/initial_states.yaml +mv initial_states.yaml /var/0chain/ echo -e "\n\e[93m=============================================================================================================================================================================== Installing yq on your server @@ -29,7 +36,7 @@ pushd ${PROJECT_ROOT} > /dev/null; #Sharder Delegate wallet if [[ -f del_wal_id.txt ]] ; then echo -e "\e[32m Sharders delegate wallet id present \e[23m \e[0;37m" - SHARDER_DEL=$(cat del_wal_id.txt) + export SHARDER_DEL=$(cat del_wal_id.txt) else echo "Unable to find sharder delegate wallet" exit 1 @@ -61,7 +68,7 @@ pushd ${PROJECT_ROOT} > /dev/null; sudo cp -rf keys/b0s* sharder/ssd/docker.local/config # sharder/ssd/docker.local/config sudo cp -f nodes.yaml sharder/ssd/docker.local/config/nodes.yaml sudo cp -f b0magicBlock.json sharder/ssd/docker.local/config/b0magicBlock.json - sudo cp -f initial_states.yaml sharder/ssd/docker.local/config/initial_state.yaml + sudo cp -f initial_states.yaml sharder/ssd/docker.local/config/initial_states.yaml fi popd > /dev/null; @@ -86,7 +93,7 @@ pushd ${PROJECT_ROOT}/sharder/ssd > /dev/null; PG_PASSWORD=$(cat sharder_pg_password) fi echo -e "\e[32m Successfully Created the password\e[23m \e[0;37m" - yq e -i '.delegate_wallet = "${SHARDER_DEL}"' ./docker.local/config/0chain.yaml + yq e -i '.delegate_wallet = env(SHARDER_DEL)' ./docker.local/config/0chain.yaml sed -i "s/zchian/${PG_PASSWORD}/g" ./docker.local/sql_script/00-create-user.sql sed -i "s/zchian/${PG_PASSWORD}/g" ./docker.local/build.sharder/p0docker-compose.yaml echo -e "\e[32m Successfully Updated the configs\e[23m \e[0;37m" @@ -104,6 +111,15 @@ pushd ${PROJECT_ROOT}/sharder/hdd/docker.local > /dev/null; done popd > /dev/null; +echo -e "\n\e[93m=============================================================================================================================================================================== + Backing up keys and configs for sharder. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + export timestamp=archive-sharder-$(date +"%Y-%m-%d-%T") + zip -r $timestamp.zip . + cp $timestamp.zip ~ +popd + echo -e "\n\e[93m=============================================================================================================================================================================== Starting sharders =============================================================================================================================================================================== \e[39m" @@ -115,3 +131,6 @@ pushd ${PROJECT_ROOT}/sharder/ssd/docker.local > /dev/null; #/sharder/ssd cd ../ done popd > /dev/null; + +echo +echo "Please backup the $HOME/$timestamp.zip file to your local or to another server." diff --git a/0chain/deploy_sharder_ssls.sh b/0chain/deploy_sharder_ssls.sh index aaeb4409..3549a5e3 100644 --- a/0chain/deploy_sharder_ssls.sh +++ b/0chain/deploy_sharder_ssls.sh @@ -160,7 +160,7 @@ echo -e "\n\e[93m=============================================================== Deploying grafana and portainer =============================================================================================================================================================================== \e[39m" pushd ${PROJECT_ROOT}/grafana-portainer > /dev/null; #/sharder/ssd - sudo chown 10001:10001 ./loki + sudo chown 1001:1001 ./loki bash ./start.p0monitor.sh ${HOST} admin ${PASSWORD} popd > /dev/null; @@ -183,12 +183,12 @@ pushd ${PROJECT_ROOT}/grafana-portainer/grafana > /dev/null; sleep 20s curl -X POST -H "Content-Type: application/json" \ - -d "@./server.json" \ + -d "{\"dashboard\":$(cat ./docker_system_monitoring.json)}" \ "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" - # curl -X POST -H "Content-Type: application/json" \ - # -d "@./docker_system_monitoring.json" \ - # "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" + curl -X POST -H "Content-Type: application/json" \ + -d "@./server.json" \ + "https://admin:${PASSWORD}@${HOST}/grafana/api/dashboards/import" if [[ ${SHARDER} -gt 0 ]] ; then curl -X POST -H "Content-Type: application/json" \ @@ -213,3 +213,17 @@ echo "Grafana Username --> admin" echo "Grafana Password --> ${PASSWORD}" echo -e "\nPortainer Username --> admin" echo "Portainer Password --> ${PASSWORD}" + +echo -e "\n\e[93m=============================================================================================================================================================================== + Loki logs cleanup +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; +cat <loki-logs-cleanup-job.sh +docker stop loki +rm -rf /var/0chain/grafana-portainer/loki/chunks +docker start loki +EOF +sudo chmod +x loki-logs-cleanup-job.sh +echo "0 0 */3 * * ${PROJECT_ROOT}/loki-logs-cleanup-job.sh" > crontab_loki +crontab crontab_loki +popd > /dev/null; diff --git a/0chain/fix3_loki_cleanup.sh b/0chain/fix3_loki_cleanup.sh new file mode 100644 index 00000000..d60a8c75 --- /dev/null +++ b/0chain/fix3_loki_cleanup.sh @@ -0,0 +1,24 @@ +#!/bin/bash + +set -e + +echo -e "\n\e[93m=============================================================================================================================================================================== + setup variables +=============================================================================================================================================================================== \e[39m" +export PROJECT_ROOT=/var/0chain # /var/0chain +echo -e "\e[32m Successfully set \e[23m \e[0;37m" + +echo -e "\n\e[93m=============================================================================================================================================================================== + updating -loki logs cleanup on both sharder and miner. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + if [[ -f loki-logs-cleanup-job.sh ]] ; then + echo -e "\e[32m loki-logs-cleanup-job.sh file is present \e[23m \e[0;37m" + sudo rm loki-logs-cleanup-job.sh + wget -N https://raw.githubusercontent.com/0chain/zcnwebappscripts/as-deploy/0chain/loki-logs-cleanup-job.sh + sudo chmod +x loki-logs-cleanup-job.sh + else + echo -e "\e[31m loki-logs-cleanup-job.sh file is not present. Please contact zus team \e[13m \e[0;37m" + exit 1 + fi +popd > /dev/null; diff --git a/0chain/fix_2_num_del.sh b/0chain/fix_2_num_del.sh new file mode 100644 index 00000000..6ab7cad5 --- /dev/null +++ b/0chain/fix_2_num_del.sh @@ -0,0 +1,33 @@ +#!/bin/bash + +set -e + +echo -e "\n\e[93m=============================================================================================================================================================================== + setup variables +=============================================================================================================================================================================== \e[39m" +export PROJECT_ROOT=/var/0chain # /var/0chain +echo -e "\e[32m Successfully set \e[23m \e[0;37m" + +echo -e "\n\e[93m=============================================================================================================================================================================== + updating --num_delegates to 1 on both sharder and miner. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + if [[ -f delegate_wallet.json ]] ; then + if [[ -f keys/b0mnode1_keys.json ]] ; then + echo -e "\e[32m Updating miner \e[23m \e[0;37m" + prov_min_id=$(jq -r .client_id keys/b0mnode1_keys.json) + ./bin/zwallet mn-update-settings --id ${prov_min_id} --num_delegates 1 --configDir . --config ./config.yaml --wallet delegate_wallet.json + elif [[ -f keys/b0snode1_keys.json ]] ; then + echo -e "\e[32m Updating sharder \e[23m \e[0;37m" + prov_shar_id=$(jq -r .client_id keys/b0snode1_keys.json) + ./bin/zwallet mn-update-settings --sharder true --id ${prov_shar_id} --num_delegates 1 --configDir . --config ./config.yaml --wallet delegate_wallet.json + else + echo -e "\e[31m didn't found sharder/miner keys on the server. Please connect with zus team. \e[13m \e[0;37m" + exit 1 + fi + else + echo -e "\e[31m ##### Delegate wallet not present on your server. Please run below command manually after replacing your provider id using delegate_wallet.json. ##### \e[13m \e[0;37m" + echo -e "\e[32m ./bin/zwallet mn-update-settings --id --num_delegates 1 --configDir . --config ./config.yaml --wallet delegate_wallet.json \e[23m \e[0;37m" + exit 1 + fi +popd > /dev/null; diff --git a/0chain/generate_delegate_wallet.sh b/0chain/generate_miner_del_wallet.sh similarity index 98% rename from 0chain/generate_delegate_wallet.sh rename to 0chain/generate_miner_del_wallet.sh index e186bbd2..8219e987 100644 --- a/0chain/generate_delegate_wallet.sh +++ b/0chain/generate_miner_del_wallet.sh @@ -16,8 +16,9 @@ sudo apt update echo -e "\e[32m 2. Installing jq, zip, unzip. \e[23m \e[0;37m" sudo apt install jq zip unzip -y echo -e "\e[32m 2. Installing build essentials and gcc. \e[23m \e[0;37m" -sudo apt install build-essential nghttp2 libnghttp2-dev libssl-dev -y -sudo apt install gcc-11 g++-11 -y +sudo apt install software-properties-common -y +echo -ne '\n' | sudo add-apt-repository ppa:ubuntu-toolchain-r/test +sudo apt install build-essential nghttp2 libnghttp2-dev libssl-dev gcc-11 g++-11 -y echo -e "\n\e[93m=============================================================================================================================================================================== Persisting Delegate wallet inputs. @@ -68,7 +69,7 @@ pushd ${PROJECT_ROOT} > /dev/null; elif [[ ${ubuntu_version} -eq 20 || ${ubuntu_version} -eq 22 ]]; then curl -L "https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/artifacts/zwallet-binary.zip" -o /tmp/zwallet-binary.zip sudo unzip -o /tmp/zwallet-binary.zip && rm -rf /tmp/zwallet-binary.zip - mkdir bin + mkdir bin || true sudo cp -rf zwallet-binary/* ${PROJECT_ROOT}/bin/ sudo rm -rf zwallet-binary echo "block_worker: https://beta.zus.network/dns" > config.yaml diff --git a/0chain/generate_sharder_del_wallet.sh b/0chain/generate_sharder_del_wallet.sh new file mode 100644 index 00000000..e10d98b9 --- /dev/null +++ b/0chain/generate_sharder_del_wallet.sh @@ -0,0 +1,154 @@ +#!/bin/bash + +set -e + +############################################################ +# setup variables +############################################################ +export PROJECT_ROOT="/var/0chain" # /var/0chain +mkdir -p $PROJECT_ROOT + +echo -e "\n\e[93m=============================================================================================================================================================================== + Installing some pre-requisite tools on your server +=============================================================================================================================================================================== \e[39m" +echo -e "\e[32m 1. Apt update. \e[23m \e[0;37m" +sudo apt update +echo -e "\e[32m 2. Installing jq, zip, unzip. \e[23m \e[0;37m" +sudo apt install jq zip unzip -y +echo -e "\e[32m 2. Installing build essentials and gcc. \e[23m \e[0;37m" +sudo apt install software-properties-common -y +echo -ne '\n' | sudo add-apt-repository ppa:ubuntu-toolchain-r/test +sudo apt install build-essential nghttp2 libnghttp2-dev libssl-dev gcc-11 g++-11 -y + +echo -e "\n\e[93m=============================================================================================================================================================================== + Persisting Delegate wallet inputs. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + + #Delegate wallet input + if [[ -f del_wal_id.txt ]] ; then + CLIENTID=$(cat del_wal_id.txt) + echo "Delegate wallet id already exists i.e.: ${CLIENTID}" + else + while true; do + read -p "Do you wish to enter delegate wallet id as an input? Input yes or no. " yn + case $yn in + [Yy]* ) + read -p "Enter the pregenerated delegate wallet id : " CLIENTID + sudo sh -c "echo -n ${CLIENTID} > del_wal_id.txt" + break;; + [Nn]* ) + echo "You entered no. Will create a new delegate wallet for you." + break;; + * ) + echo "Please answer yes or no.";; + esac + done + fi + +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Generating delegate wallet. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + + #Delegate Wallet Input + if [[ -n ${CLIENTID} ]] ; then + echo "Delegate wallet id found." + CLIENTID=$(cat del_wal_id.txt) + else + echo -e "\e[32m Creating new delegate wallet. \e[23m \e[0;37m" + if [[ -f bin/zwallet ]] ; then + echo "zwallet binary already present" + else + ubuntu_version=$(lsb_release -rs | cut -f1 -d'.') + if [[ ${ubuntu_version} -eq 18 ]]; then + echo "Ubuntu 18 is not supported" + exit 1 + elif [[ ${ubuntu_version} -eq 20 || ${ubuntu_version} -eq 22 ]]; then + curl -L "https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/artifacts/zwallet-binary.zip" -o /tmp/zwallet-binary.zip + sudo unzip -o /tmp/zwallet-binary.zip && rm -rf /tmp/zwallet-binary.zip + mkdir bin || true + sudo cp -rf zwallet-binary/* ${PROJECT_ROOT}/bin/ + sudo rm -rf zwallet-binary + echo "block_worker: https://beta.zus.network/dns" > config.yaml + echo "signature_scheme: bls0chain" >> config.yaml + echo "min_submit: 50" >> config.yaml + echo "min_confirmation: 50" >> config.yaml + echo "confirmation_chain_length: 3" >> config.yaml + echo "max_txn_query: 5" >> config.yaml + echo "query_sleep_time: 5" >> config.yaml + else + echo "Didn't found any Ubuntu version with 20/22." + fi + fi + ./bin/zwallet create-wallet --wallet delegate_wallet.json --configDir . --config config.yaml --silent + CLIENTID=$( jq -r .client_id delegate_wallet.json ) + sudo sh -c "echo -n ${CLIENTID} > del_wal_id.txt" + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Verifying wallets in initial stats. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + + if grep "6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d3" initial_states.yaml; then + sed -i "s/10000000000/10000000000000/g" ./initial_states.yaml + wallet_line_no=$(grep -n 6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d3 initial_states.yaml | awk -F : '{print $1}') + sed -i "$wallet_line_no,$((++wallet_line_no))d" initial_states.yaml + # head -n -2 initial_states.yaml + fi + if ! grep "f1d14699ccad97ca893a635e68e128b0717f8a1aab1a071db6b40935cbfce90c" initial_states.yaml; then + cat <>initial_states.yaml + - id: f1d14699ccad97ca893a635e68e128b0717f8a1aab1a071db6b40935cbfce90c + tokens: 100000000000000 + - id: 7051ca0cf6f6157a54fa91570d2bb8ab8723b1050381b3d95b66debfdbcf5416 + tokens: 100000000000000 + - id: 4de1553b44e4942593b96ca2ee86d543967762929bf6db9c7c65a7446984e6f1 + tokens: 100000000000000 + - id: c7cd30d15f713068e65c6469df38d84ec128267bba2c4067360b4d69f208c75e + tokens: 100000000000000 + - id: 37567fc630d9b747364678158df4fcae8d5da2c077681a592ff406c143b5c664 + tokens: 100000000000000 + - id: ee1a04d880f03c8f9df25f825a27526a34626dcf9bcffd5c7c182919315e899e + tokens: 100000000000000 + - id: 07aebb92690d3946e5f66b8088ca1fd5e8049dbf203167fc000e22a0a9ea9071 + tokens: 100000000000000 + - id: 14b16712cc0d3d2573299a474c0e297616aaea2413709fa8d3d6fda698609142 + tokens: 100000000000000 + - id: 5d6bb641dac8fd6d78efe64436ec4b096e2c67ba43386084fe7bce48389a8394 + tokens: 100000000000000 + - id: 120501bbbf5f1cbcfb939952e37ef7ff85bf0282031e2ec81edaa5f424242ae8 + tokens: 100000000000000 + - id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + tokens: 100000000000000 + - id: 65b32a635cffb6b6f3c73f09da617c29569a5f690662b5be57ed0d994f234335 + tokens: 100000000000000000 +EOF + else + echo "Wallet's already added." + fi + + if [[ $(wc -l initial_states.yaml | awk '{print $1}') != 285 ]]; then + echo "Initial stats file corrupted, Please contact zus team." + exit 1 + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Ouput Json that will be shared with zus team. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + if [[ ! $(curl -I --silent -o /dev/null -w %{http_code} https://del-wal.0chain.net/getOutput) =~ 2[0-9][0-9] ]]; then + echo "Delegate wallet address server is down. Please contact zus team." + exit 1 + fi + echo "Details need to be send to ZUS Team." + echo "{\"provider_id\":\"$( jq -r .client_id keys/b0snode1_keys.json )\",\"domain\":\"$(cat sharder/url.txt)\",\"client_id\":\"$(cat del_wal_id.txt)\"}" > send_tokens.json + echo + cat send_tokens.json + curl -X POST -F "file=@send_tokens.json" https://del-wal.0chain.net/addData + echo +pushd > /dev/null; diff --git a/0chain/grafana-portainer/grafana/docker_system_monitoring.json b/0chain/grafana-portainer/grafana/docker_system_monitoring.json index a2bb8af1..77e1fe74 100644 --- a/0chain/grafana-portainer/grafana/docker_system_monitoring.json +++ b/0chain/grafana-portainer/grafana/docker_system_monitoring.json @@ -26,7 +26,7 @@ "fiscalYearStartMonth": 0, "gnetId": 893, "graphTooltip": 1, - "id": 1, + "id": null, "links": [], "liveNow": false, "panels": [ @@ -2087,7 +2087,7 @@ }, "datasource": { "type": "prometheus", - "uid": "P86337B8C7A1323D2" + "uid": "${Datasource}" }, "definition": "label_values(node_boot_time_seconds, instance)", "hide": 0, @@ -2160,4 +2160,4 @@ "uid": "docker-overview", "version": 1, "weekStart": "" - } \ No newline at end of file + } diff --git a/0chain/grafana-portainer/loki/loki-config.yaml b/0chain/grafana-portainer/loki/loki-config.yaml index 47bfffe7..ca5d2452 100644 --- a/0chain/grafana-portainer/loki/loki-config.yaml +++ b/0chain/grafana-portainer/loki/loki-config.yaml @@ -51,6 +51,10 @@ storage_config: compactor: working_directory: /data/compactor # directory where marked chunks and temporary tables will be saved shared_store: filesystem + retention_enabled: true + compaction_interval: 24h + retention_delete_delay: 1m + retention_delete_worker_count: 150 limits_config: reject_old_samples: true diff --git a/0chain/loki-logs-cleanup-job.sh b/0chain/loki-logs-cleanup-job.sh new file mode 100644 index 00000000..935c0863 --- /dev/null +++ b/0chain/loki-logs-cleanup-job.sh @@ -0,0 +1,3 @@ +docker stop loki +rm -rf /var/0chain/grafana-portainer/loki/chunks +docker start loki diff --git a/0chain/miner-files/docker.local/bin/init.setup.sh b/0chain/miner-files/docker.local/bin/init.setup.sh index cef497f4..c1015101 100644 --- a/0chain/miner-files/docker.local/bin/init.setup.sh +++ b/0chain/miner-files/docker.local/bin/init.setup.sh @@ -10,5 +10,5 @@ do mkdir -p $SSD_PATH/docker.local/miner"$i"/data/redis/transactions mkdir -p $SSD_PATH/docker.local/miner"$i"/data/rocksdb mkdir -p $SSD_PATH/docker.local/miner"$i"/log - chmod 755 -R $SSD_PATH/docker.local/miner"$i" + chmod 777 -R $SSD_PATH/docker.local/miner"$i" done \ No newline at end of file diff --git a/0chain/miner-files/docker.local/build.miner/p0docker-compose.yaml b/0chain/miner-files/docker.local/build.miner/p0docker-compose.yaml index 1accf0b2..f5bd3e6e 100644 --- a/0chain/miner-files/docker.local/build.miner/p0docker-compose.yaml +++ b/0chain/miner-files/docker.local/build.miner/p0docker-compose.yaml @@ -34,7 +34,7 @@ services: miner: container_name: miner-${MINER} - image: 0chaindev/miner:v1.10.0 + image: 0chaindev/miner:v1.11.0 environment: - DOCKER=true - REDIS_HOST=miner-redis-${MINER} @@ -51,7 +51,7 @@ services: - ${PROJECT_ROOT_SSD}/miner${MINER}/log:/0chain/log ports: - "707${MINER}:707${MINER}" - command: ./bin/miner --deployment_mode 0 --keys_file config/b0mnode${MINER}_keys.txt --dkg_file config/dkgSummary-${MINER}_dkg.json + command: ./bin/miner --deployment_mode 2 --keys_file config/b0mnode${MINER}_keys.txt --dkg_file config/dkgSummary-${MINER}_dkg.json restart: unless-stopped networks: testnet0: diff --git a/0chain/miner-files/docker.local/config/0chain.yaml b/0chain/miner-files/docker.local/config/0chain.yaml index e37f708e..18575177 100644 --- a/0chain/miner-files/docker.local/config/0chain.yaml +++ b/0chain/miner-files/docker.local/config/0chain.yaml @@ -1,29 +1,15 @@ version: 1.0 logging: - level: "debug" - verbose: true + level: "error" + verbose: false console: false # printing log to console is only supported in development mode goroutines: false memlog: false -development: - smart_contract: - zrc20: true - txn_generation: - wallets: 50 - max_transactions: 0 - max_txn_fee: 10000 - min_txn_fee: 0 - max_txn_value: 10000000000 - min_txn_value: 100 - faucet: - refill_amount: 1000000000000000 - pprof: true - server_chain: id: "0afc093ffb509f059c55478bc1a60351cef7b4e9c008a53a6cc8241ca8617dfe" - owner: "edb90b850f2e7e7cbd0a1fa370fdcc5cd378ffbec95363a7bc0e5a98b8ba5759" + owner: "ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4" decimals: 10 tokens: 200000000 genesis_block: @@ -33,13 +19,13 @@ server_chain: max_block_cost: 10000 #equal to 100ms max_byte_size: 1638400 min_generators: 2 - generators_percent: 0.15 + generators_percent: 0.1 replicators: 0 generation: timeout: 15 retry_wait_time: 5 #milliseconds proposal: - max_wait_time: 180ms + max_wait_time: 250ms wait_mode: static # static or dynamic consensus: threshold_by_count: 66 # percentage (registration) @@ -66,16 +52,15 @@ server_chain: payload: max_size: 98304 # bytes timeout: 1800 #30 minutes - min_fee: 0 - max_fee: 0.000000001 # 10 SAS - cost_fee_coeff: 100000 # 1000000 costs represents 1 ZCN, 1000 cost represents 1 mZCN + min_fee: 0.0001 + max_fee: 0.2 # max fee per txn would be 1 ZCN, adjust later if needed transfer_cost: 10 + cost_fee_coeff: 100000 # 100000 unit cost per 1 ZCN future_nonce: 100 # allow 100 nonce ahead of current client state exempt: - contributeMpk - shareSignsOrShares - wait - - mint - pour client: signature_scheme: bls0chain # ed25519 or bls0chain @@ -95,7 +80,7 @@ server_chain: setting_update_period: 200 #rounds timeout: 8000ms storage: true - faucet: true + faucet: false miner: true multisig: false vesting: false @@ -135,10 +120,10 @@ server_chain: max_idle_conns: 100 max_open_conns: 200 conn_max_lifetime: 20s - slowtablespace: hddtablespace + slowtablespace: hdd_tablespace settings: # event database settings blockchain - debug: true + debug: false aggregate_period: 4000 partition_change_period: 10000 partition_keep_count: 10 @@ -146,7 +131,7 @@ server_chain: network: magic_block_file: config/b0magicBlock.json - initial_states: config/initial_state.yaml + initial_states: config/initial_states.yaml genesis_dkg: 0 dns_url: "" # http://198.18.0.98:9091 relay_time: 200 # milliseconds @@ -156,27 +141,25 @@ network: large_message: 3000 # milliseconds large_message_th_size: 5120 # anything greater than this size in bytes user_handlers: - rate_limit: 100000000 # 100000000 per second + rate_limit: 1000 # 1000 per second n2n_handlers: - rate_limit: 10000000000 # 10000000000 per second + rate_limit: 1000 # 10000 per second # delegate wallet is wallet that used to configure node in Miner SC; if its # empty, then node ID used delegate_wallet: "" # % of fees and rewards for generator -service_charge: 0.10 # [0; 1) of all fees +service_charge: 0 # [0; 1) of all fees # max number of delegate pools allowed by a node in miner SC -number_of_delegates: 10 # max number of delegate pools - -cassandra: - connection: - delay: 10 # in seconds - retries: 10 -# host: cassandra -# port: 9042 +number_of_delegates: 100 # max number of delegate pools +# There's a TODO comment in fs_store.go. Please check this while we go into production. storage: -# Uncomment the following lines to enable cache. +# cache is optional. It should be SSD drive. Having HDD drive as cache is not effective. +# Cache is effective when blocks are stored in HDD. Cache stores uncompressed blocks so that +# accessing and unmarshalling is faster than with compressed block in HDD. +# +# Uncomment the following lines to enable cache. # cache: # path: "/mnt/ssd/sharder1" # total_blocks: 1000 # Total number of blocks this cache will store diff --git a/0chain/miner-files/docker.local/config/sc.yaml b/0chain/miner-files/docker.local/config/sc.yaml index 90e155b4..db60ec4c 100644 --- a/0chain/miner-files/docker.local/config/sc.yaml +++ b/0chain/miner-files/docker.local/config/sc.yaml @@ -1,36 +1,37 @@ smart_contracts: faucetsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 pour_limit: 1 pour_amount: 1 - max_pour_amount: 100000 - periodic_limit: 100000000 - global_limit: 100000000 - individual_reset: 10m # in hours - global_reset: 10m # in hours + max_pour_amount: 10 + periodic_limit: 1000 + global_limit: 100000 + individual_reset: 3h # in hours + global_reset: 48h # in hours cost: update-settings: 100 - pour: 226 + pour: 100 refill: 100 minersc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 # miners max_n: 103 # 100 min_n: 103 # 3 # sharders - max_s: 28 # 30 + max_s: 27 # 30 min_s: 1 # 1 # max delegates allowed by SC - max_delegates: 200 # + max_delegates: 100 # # DKG t_percent: .66 # of active k_percent: .75 # of registered x_percent: 0.70 # percentage of prev mb miners required to be part of next mb # etc - min_stake: 0.0 # min stake can be set by a node (boundary for all nodes) - max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + min_stake: 1000.0 # min stake can be set by a node (boundary for all nodes) + max_stake: 2000000.0 # max stake can be set by a node (boundary for all nodes) + min_stake_per_delegate: 50000.0 start_rounds: 50 contribute_rounds: 50 share_rounds: 50 @@ -40,13 +41,13 @@ smart_contracts: reward_rate: 1.0 # [0; 1) # share ratio is miner/block sharders rewards ratio, for example 0.1 # gives 10% for miner and rest for block sharders - share_ratio: 0.16 # [0; 1) + share_ratio: 0.79 # [0; 1) # reward for a block - block_reward: 0.068 # tokens + block_reward: 0.09 # tokens # max service charge can be set by a generator max_charge: 0.5 # % # epoch is number of rounds before rewards and interest are decreased - epoch: 125000000 # rounds + epoch: 95000000 # rounds # decline rewards every new epoch by this value (the block_reward) reward_decline_rate: 0.1 # [0; 1), 0.1 = 10% # no mints after miner SC total mints reaches this boundary @@ -62,49 +63,50 @@ smart_contracts: health_check_period: 90m cooldown_period: 100 cost: - add_miner: 318 + add_miner: 361 add_sharder: 331 - delete_miner: 435 - delete_sharder: 308 - miner_health_check: 137 - sharder_health_check: 141 - contributeMpk: 1347 - shareSignsOrShares: 495 + delete_miner: 484 + delete_sharder: 335 + miner_health_check: 149 + sharder_health_check: 145 + contributeMpk: 1433 + shareSignsOrShares: 509 wait: 100 #todo - update_globals: 250 - update_settings: 120 - update_miner_settings: 125 - update_sharder_settings: 125 - payFees: 1230 + update_globals: 269 + update_settings: 136 + update_miner_settings: 137 + update_sharder_settings: 134 + payFees: 1356 feesPaid: 100 #todo mintedTokens: 100 #todo - addToDelegatePool: 175 - deleteFromDelegatePool: 139 - sharder_keep: 197 - collect_reward: 208 - kill_miner: 133 - kill_sharder: 138 + addToDelegatePool: 186 + deleteFromDelegatePool: 150 + sharder_keep: 211 + collect_reward: 230 + kill_miner: 146 + kill_sharder: 140 storagesc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 # the time_unit is a duration used as divider for a write price; a write # price measured in tok / GB / time_unit, where the time_unit is this # configuration; for example 1h, 24h (a day), 720h (a month -- 30 days); - time_unit: "720h" - min_stake: 0.01 # min stake can be set by a node (boundary for all nodes) - max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + time_unit: "8760h" + min_stake: 1.0 # min stake can be set by a node (boundary for all nodes) + max_stake: 2000000.0 # max stake can be set by a node (boundary for all nodes) # max_mint max_mint: 75000000.0 # tokens, max amount of tokens can be minted by SC + min_stake_per_delegate: 10.0 # min possible allocations size in bytes allowed by the SC - min_alloc_size: 1024 - # max challenge completion time of a blobber allowed by the SC - max_challenge_completion_time: "10m" + min_alloc_size: 1073741824 + # max challenge completion round of a blobber allowed by the SC + max_challenge_completion_rounds: 1200 # min blobber's offer duration allowed by the SC - min_offer_duration: "10h" #todo based on timeunit + min_offer_duration: "8760h" #todo based on timeunit # min blobber capacity allowed by the SC - min_blobber_capacity: 1024 + min_blobber_capacity: 10995116277760 # fraction of the allocation cost that is locked in the cancellation charge cancellation_charge: 0.2 - min_lock_demand: 0.1 + min_lock_demand: 1 # users' read pool related configurations readpool: min_lock: 0.0 # tokens @@ -114,18 +116,18 @@ smart_contracts: # stake pool configurations stakepool: # minimal lock for a delegate pool - min_lock: 0.01 # tokens + min_lock: 0.1 # tokens kill_slash: 0.5 # following settings are for free storage rewards # # summarized amount for all assigner's lifetime - max_total_free_allocation: 100000000000000000 #todo figure out how it works + max_total_free_allocation: 10000000 # the limit of tokens can be minted on each free_allocation_request - max_individual_free_allocation: 1000000 + max_individual_free_allocation: 1 # allocation settings for free storage # these values are applied to all free allocations free_allocation_settings: - data_shards: 2 + data_shards: 6 parity_shards: 3 read_pool_fraction: 0 read_price_range: @@ -133,7 +135,7 @@ smart_contracts: min: 0 size: 2147483648 write_price_range: - max: 1 + max: 0.025 min: 0 validator_reward: 0.025 # blobber_slash represents blobber's stake penalty when a challenge not @@ -142,28 +144,32 @@ smart_contracts: # duration between health check after which a blobber or validator is considered inactive health_check_period: 90m # max prices for blobbers (tokens per GB) - max_read_price: 100.0 - max_write_price: 100.0 + max_read_price: 0.0 + max_write_price: 0.025 min_write_price: 0.001 - max_blobbers_per_allocation: 40 + # max file size on blobber + max_file_size: 549755813888 # 512GB + max_blobbers_per_allocation: 30 # # challenges # # enable challenges challenge_enabled: true + challenge_generation_gap : 1 # number of validators per challenge - validators_per_challenge: 2 + validators_per_challenge: 10 num_validators_rewarded: 10 + max_blobber_select_for_challenge: 5 # max delegates per stake pool allowed by SC - max_delegates: 200 + max_delegates: 100 # max_charge allowed for blobbers; the charge is part of blobber rewards # goes to blobber's delegate wallets, other part goes to related stake # holders max_charge: 0.50 # reward paid out every block block_reward: - block_reward: 0.06 - block_reward_change_period: 125000000 + block_reward: 2.37 + block_reward_change_period: 95000000 block_reward_change_ratio: 0.1 qualifying_stake: 1 trigger_period: 30 @@ -176,43 +182,39 @@ smart_contracts: k: 0.9 mu: 0.2 cost: - update_settings: 135 - read_redeem: 606 - commit_connection: 670 - new_allocation_request: 3000 - update_allocation_request: 2500 - finalize_allocation: 993 - cancel_allocation: 5000 - add_free_storage_assigner: 115 - free_allocation_request: 2417 - free_update_allocation: 2500 - blobber_health_check: 88 - validator_health_check: 87 - update_blobber_settings: 309 - update_validator_settings: 214 - pay_blobber_block_rewards: 807 - challenge_request: 100 #todo - challenge_response: 684 - add_validator: 443 - add_blobber: 240 - new_read_pool: 94 - read_pool_lock: 154 - read_pool_unlock: 93 - write_pool_lock: 167 - write_pool_unlock: 103 - stake_pool_lock: 167 - stake_pool_unlock: 103 - stake_pool_pay_interests: 100 #todo - commit_settings_changes: 52 - generate_challenge: 694 - blobber_block_rewards: 806 - collect_reward: 158 - kill_blobber: 669 - kill_validator: 350 - shutdown_blobber: 100 - shutdown_validator: 100 + update_settings: 143 + read_redeem: 664 + commit_connection: 743 + new_allocation_request: 1919 + update_allocation_request: 2692 + finalize_allocation: 1091 + cancel_allocation: 1163 + add_free_storage_assigner: 124 + free_allocation_request: 2132 + free_update_allocation: 1468 + blobber_health_check: 97 + validator_health_check: 109 + update_blobber_settings: 338 + update_validator_settings: 247 + pay_blobber_block_rewards: 100 #todo + challenge_response: 728 + add_validator: 348 + add_blobber: 266 + read_pool_lock: 170 + read_pool_unlock: 104 + write_pool_lock: 186 + stake_pool_lock: 187 + stake_pool_unlock: 119 + commit_settings_changes: 56 + generate_challenge: 600 + blobber_block_rewards: 794 + collect_reward: 181 + kill_blobber: 651 + kill_validator: 277 + shutdown_blobber: 597 + shutdown_validator: 227 vestingsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 min_lock: 0.01 min_duration: "2m" max_duration: "2h" @@ -226,14 +228,17 @@ smart_contracts: delete: 100 vestingsc-update-settings: 100 zcnsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 min_mint: 1 min_burn: 1 - min_stake: 0 + min_stake: 1000 + max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + min_stake_per_delegate: 10000 min_authorizers: 1 percent_authorizers: 0.7 + max_mint: 200000000 # ZCN max_delegates: 10 - max_fee: 100 #todo change the wording + max_fee: 10000000 #todo change the wording burn_address: "0000000000000000000000000000000000000000000000000000000000000000" #todo maybe we should use sc address health_check_period: 90m cost: @@ -242,3 +247,4 @@ smart_contracts: add-authorizer: 100 authorizer-health-check: 100 delete-authorizer: 100 + update-global-config: 100 diff --git a/0chain/miner_migration_init_setup.sh b/0chain/miner_migration_init_setup.sh new file mode 100644 index 00000000..95955872 --- /dev/null +++ b/0chain/miner_migration_init_setup.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +set -e + +############################################################ +# setup variables +############################################################ +export PROJECT_ROOT="/var/0chain" # /var/0chain +export PROJECT_ROOT_SSD=/var/0chain/miner/ssd # /var/0chain/miner/ssd +export PROJECT_ROOT_HDD=/var/0chain/miner/hdd # /var/0chain/miner/hdd + +mkdir -p ${PROJECT_ROOT}/miner/ssd +mkdir -p ${PROJECT_ROOT}/miner/hdd + +echo -e "\n\e[93m=============================================================================================================================================================================== + Installing some pre-requisite tools on your server +=============================================================================================================================================================================== \e[39m" +echo -e "\e[32m 1. Apt update. \e[23m \e[0;37m" +sudo apt update +echo -e "\e[32m 2. Installing qq. \e[23m \e[0;37m" +sudo apt install -qq -y +echo -e "\e[32m 3. Installing unzip, dnsutils, ufw, ntp, ntpdate. \e[23m \e[0;37m" +sudo apt install unzip dnsutils ufw ntp ntpdate -y +echo -e "\e[32m 4. Installing docker & docker-compose. \e[23m \e[0;37m" +DOCKERCOMPOSEVER=v2.2.3 ; sudo apt install docker.io -y; sudo systemctl enable --now docker ; docker --version ; sudo curl -L "https://github.com/docker/compose/releases/download/$DOCKERCOMPOSEVER/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose; sudo chmod +x /usr/local/bin/docker-compose ; docker-compose --version +sudo chmod 777 /var/run/docker.sock &> /dev/null +sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 || true +sudo chmod a+x /usr/local/bin/yq || true +yq --version || true + +echo -e "\n\e[93m=============================================================================================================================================================================== + Setting up ntp +=============================================================================================================================================================================== \e[39m" +sudo ufw disable +sudo ufw allow 123/udp +sudo ufw allow out to any port 123 +sudo systemctl stop ntp +sudo ntpdate pool.ntp.org +sudo systemctl start ntp +sudo systemctl enable ntp + +echo -e "\n\e[93m=============================================================================================================================================================================== + Checking docker service running or not +=============================================================================================================================================================================== \e[39m" +echo -e "\e[32m 1. Docker status. \e[23m" +if (systemctl is-active --quiet docker) ; then + echo -e "\e[32m docker is running fine. \e[23m \n" +else + echo -e "\e[31m $REQUIRED_PKG is failing to run. Please check and resolve it first. You can connect with team for support too. \e[13m \n" + exit 1 +fi + +# echo -e "\n\e[93m=============================================================================================================================================================================== +# Checking URL entered is resolving or not. +# =============================================================================================================================================================================== \e[39m" +# ipaddr=$(curl api.ipify.org) +# myip=$(dig +short $PUBLIC_ENDPOINT) +# if [[ "$myip" != "$ipaddr" ]]; then +# echo "$PUBLIC_ENDPOINT IP resolution mistmatch $myip vs $ipaddr" +# exit 1 +# else +# echo "SUCCESS $PUBLIC_ENDPOINT resolves to $myip" +# fi diff --git a/0chain/miner_snapshot_build_restic.sh b/0chain/miner_snapshot_build_restic.sh new file mode 100644 index 00000000..1ab00cb2 --- /dev/null +++ b/0chain/miner_snapshot_build_restic.sh @@ -0,0 +1,55 @@ +#!/bin/bash + +export MINER_SNAP=$1 +export SNAP_VERSION=$2 #date + +# Stop miner and redis container on the server +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop miner and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker stop miner-1 miner-redis-txns-1 miner-redis-1 + +# Creating snapshot folder +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot folder to store snapshot. +=============================================================================================================================================================================== \e[39m" +cd ~ +mkdir snapshots + +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot files. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshot +cd ~/snapshots +rm -rf miner-rocksdb-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for miner-rocksdb-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/miner/ssd/docker.local/miner1/data/rocksdb | pigz -p 10 > miner-rocksdb-${SNAP_VERSION}.tar.gz + +echo -e "\n\e[93m=============================================================================================================================================================================== + Start miner and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker start miner-1 miner-redis-txns-1 miner-redis-1 + +echo -e "\n\e[93m=============================================================================================================================================================================== + Moving snapshot files to zus storage using restic. +=============================================================================================================================================================================== \e[39m" +echo "Set environment variable to zs3server" +export AWS_ACCESS_KEY_ID=rootroot +export AWS_SECRET_ACCESS_KEY=rootroot +export RESTIC_REPOSITORY="s3:http://65.109.152.43:9004/miner/" +export RESTIC_PASSWORD="resticroot" + +restic -r s3:http://65.109.152.43:9004/miner/ --verbose backup ./* + +if [ $? -eq 0 ]; then + echo "Snapshot stored to zus successfully." +else + echo "Snapshot upload failed." + exit +fi + +echo -e "\n\e[93m=============================================================================================================================================================================== + Link to docs to deploy miner snapshot. +=============================================================================================================================================================================== \e[39m" +echo "Follow docs to deploy snapshot to bad miner --> https://0chaindocs.gitbook.io/as-onboarding/recovery-from-snapshots/steps-to-apply-snapshot" diff --git a/0chain/miner_snapshot_recovery_restic.sh b/0chain/miner_snapshot_recovery_restic.sh new file mode 100644 index 00000000..37264859 --- /dev/null +++ b/0chain/miner_snapshot_recovery_restic.sh @@ -0,0 +1,65 @@ +#!/bin/bash + +export TAG=$1 #image_tag +export SNAP_ID=$2 #latest +export SNAP_VERSION=$3 #date + +# Stopping existing/running miner and postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop miner and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker rm -f miner-1 miner-redis-txns-1 miner-redis-1 + +# Removing and Backup old data +echo -e "\n\e[93m=============================================================================================================================================================================== + Backing up and Removing miner data from the server. +=============================================================================================================================================================================== \e[39m" +cd /var/0chain/miner/ssd/docker.local/miner1/data/ +if [ -d "./rocksdb" ]; then + echo "Removing older ssd /rocksdb" + rm -rf rocksdb +fi + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading latest snapshot from the zus storage. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshots +cd ~ +mkdir snapshots || true +cd snapshots +echo "Removing previous pulled snapshots if exists" +rm -rf miner* + +echo "Installing Restic tool on the server" +sudo apt update -y +sudo apt install restic -y + +echo "Set environment variable to zs3server" +export AWS_ACCESS_KEY_ID=rootroot +export AWS_SECRET_ACCESS_KEY=rootroot +export RESTIC_REPOSITORY="s3:http://65.109.152.43:9004/miner/" +export RESTIC_PASSWORD="resticroot" + +restic cache --cleanup +restic restore ${SNAP_ID} --target ./ --verbose + +# if [ $? -eq 0 ]; then +# echo "snapshots downloaded from zus successfully." +# else +# echo "snapshots download failed. Please contact zus team" +# exit +# fi + +echo -e "\n\e[93m=============================================================================================================================================================================== + Extracting snapshot files into destination folder. +=============================================================================================================================================================================== \e[39m" +cd ~/snapshots +# extract miner-rocksdb.tar.gz to path /var/0chain/miner/ssd/docker.local/miner1/data/ +echo "extract miner-rocksdb-${SNAP_VERSION}.tar.gz to path /var/0chain/miner/ssd/docker.local/miner1/data/rocksdb" +tar -zxvf miner-rocksdb-${SNAP_VERSION}.tar.gz -C / +chmod 777 -R /var/0chain/miner/ssd/docker.local/miner1 + +# Starting miner with snapshot data +yq e -i ".services.miner.image = \"0chaindev/miner:${TAG}\"" /var/0chain/miner/ssd/docker.local/build.miner/p0docker-compose.yaml +cd /var/0chain/miner/ssd/docker.local/miner1/ +sudo bash ../bin/start.p0miner.sh /var/0chain/miner/ssd /var/0chain/miner/hdd diff --git a/0chain/others/del_wallets.json b/0chain/others/del_wallets.json new file mode 100644 index 00000000..bfc31c31 --- /dev/null +++ b/0chain/others/del_wallets.json @@ -0,0 +1,552 @@ +[ + { + "client_id": "c5ef5c4fb82f2aba0f74c9b832a2860ad5aa7af90ffa8547c640bd3076322e72", + "domain": "sharder.bitjuice.xyz", + "provider_id": "5ccda6733db5e582ec67f537288f9be4c28d1518242210ad4322cab1f74670c3" + }, + { + "client_id": "568975d6c7d21078c017f5224607e2facb342433eeae21aaebd942087770f21d", + "domain": "miner-2.pandora-vault.com", + "provider_id": "5b888875a8dc93be1ea20fbe98a3bef04634830b089e9cf8b6bbdc0b633cf943" + }, + { + "client_id": "608515dbe81907951bd537ec516b79e23d21eb51a9da5b5faee439609f2071f9", + "domain": "shard.zus-network.com", + "provider_id": "df2d7ad41bd12b55018c1e139d4aca680a66e4fa2d55d6c6b0156070793780eb" + }, + { + "client_id": "4ed96e4c29b165542402b1feb83114b0a97a0f4970ce3e21af67284238edc353", + "domain": "mnr.zus-storage.com", + "provider_id": "67b1447d5483dcabd86a680400f27a2e99132b879181bdeb3549b768386e258e" + }, + { + "client_id": "38f23c8e7459d6df01a1aded8e31b37c7f32c793331c81c9534cb848c692a138", + "domain": "mining.zus-network.com", + "provider_id": "f90efe8bfe60ff66bf560d3a795d5d8970148b3e939de1bfc1fec35c5b6b88c2" + }, + { + "client_id": "e363abbb7281d08d778266d18d3c5e0f8985077a9d3e8101bd6bbd2cc0555a66", + "domain": "miner.bitjuice.xyz", + "provider_id": "4fd355848e3e3efa9c87f4627db1395262a3900402ef7712a347573ae58b1697" + }, + { + "client_id": "8343f1eeac0eab7d378d4a64689e3180622eb7c1659303a5925c9e9585a63150", + "domain": "0.fra.zcn.zeroservices.eu", + "provider_id": "34b3b8c4d5bf1690f29072123c33195407b130000d1a1f0ec55050ead77ba9da" + }, + { + "client_id": "ba79493a70c86c6c252639c7ade3cd2c6abb26fbfe0e98ff387fb60a83c665b6", + "domain": "gus.badminers.com", + "provider_id": "6cea31163d04c3f77e27f4803201863c468cd9c8697fb0b39fbf6d89e20962d9" + }, + { + "client_id": "936575e3a0100551dd52d9085306ce3f5cdbb1b3a60def60a0e105473a01e4f5", + "domain": "jesse.badminers.com", + "provider_id": "00175a3bcfeff85551e7348346286d9971e7f23678fb681ccba91e836a411ef8" + }, + { + "client_id": "25eb7929339b304e3f0c007d7f7331a0939e5957d0d69ba17c59f6ccb4649525", + "domain": "walt.badminers.com", + "provider_id": "b2fd91df13c013586e911979a401dc624e61735185655b48bf391e9e37472cf6" + }, + { + "client_id": "6dfe821e4f32ae2be88c0efebf9a72af052ab3b97efbf30861c112cf0c35fc20", + "domain": "viewpoint.ddns.net", + "provider_id": "907b45f6c9125f050b5e8029b33f434cecf25270acb67c6695c80ae622dfd119" + }, + { + "client_id": "0073afe9bbf59dfa33d044c2b62512c85caf83e6aa6ba4bcb7f7aa6151ed0f31", + "domain": "saul.badminers.com", + "provider_id": "4d7ae4c40ed17cb49d6e6a5c7b1c3e4a8cca871844f7e4709673ff30ee858ab7" + }, + { + "client_id": "8756fe773055753ed01fcb678730149910f1d4a6958e0dbf59d6efbc7c70f792", + "domain": "donkeykongs1.arturocapital.com", + "provider_id": "0781a11c2733b048130c89884dd283b795ad0beb97502c70bab302a5712d6426" + }, + { + "client_id": "334fb997ae693271a5412f10c2c302f1fd95c587a70ef257b3c9f37432c4210b", + "domain": "miner.pandora-vault.com", + "provider_id": "6b7f2dd806f4edc410fcf2502b790aa0c5e1b3c3bb1979a26b3504f18f62ea2a" + }, + { + "client_id": "183a34c1f6fdc4ff49886d145206d8bb9f3152e04ae6dbdaa8429e5825d061d9", + "domain": "sharder.pandora-vault.com", + "provider_id": "0ef0d1f77ade53681e4a393020094ad4a7e34dd2c7003ef173ef8071d2c5672a" + }, + { + "client_id": "68314a298431ca6690e2aa2fa65cb08f4ceb2a39501283ecf55b70b6df08975b", + "domain": "mb01.0chainstaking.net", + "provider_id": "7b64976ab54e38165ce6049fe6e2f4fe34a6c0929e486cb8e5e535c436ed1853" + }, + { + "client_id": "50a07fc73a73148f1de02e90e75bd109239bbad87b07f016a12ee9354911c13c", + "domain": "s01.0chainstaking.net", + "provider_id": "5e6d077f09291e0a2fcdb21f5415f40d22766b71ca9218ffb51db8100a3e6ce9" + }, + { + "client_id": "1e8fe997db90ed6475fad44d0751a697c58315194b57d63a456efeb4c565aa08", + "domain": "miner6.nodely.store", + "provider_id": "76c296511da5de03cebf5a491834a363c45f331285330a4aaf5cc2df4c87bbe3" + }, + { + "client_id": "0d64a96dd4a4112057399f78c26462a251a5d49ca9f0a0674447678403d90db4", + "domain": "miner5.nodely.store", + "provider_id": "574e3b6e88b57771d2947aee62b170c4f71cca3dc7c6b24a1e4a23fdb27209a4" + }, + { + "client_id": "ba6ca0a7b7e2b41b2df2f7bf0e8ad859a8c3ade83f28c0f368a38dc474185c8d", + "domain": "nl.zusvault.com", + "provider_id": "59e5211ee0f0f2c3ba146d0f4be3bf57dff67b706ee79ec6a62c36d6ae5a2889" + }, + { + "client_id": "d7e40f8875d7070e5e8c7a965de201bd74abff33722791bbe7289dd3863dee50", + "domain": "miner-nl.zcn-network.eu", + "provider_id": "dea18859f12a805ec66a23a0b55dab47cf6f5d34e74833bbe59149558b84a83d" + }, + { + "client_id": "fa44cbf5aaf69d181c329683194c83443a61cf798cf6e18eb4be5372b1f89dbb", + "domain": "sharder-nl.zcn-network.eu", + "provider_id": "f12418ad14ef532978573adfb44c2158aa9ad344684a13de1066ea9bbfe0492b" + }, + { + "client_id": "523ad20e24a5daf8336c1067dd32bb8c1b625010b29274cb79778b61067050bb", + "domain": "miner3.nodely.store", + "provider_id": "90c91fbd753afbe72be83a9da1dc3012606f76b0da172687e237dc46375ff349" + }, + { + "client_id": "3139fc81a9a61b40fed72bf5c99e0fe284fa77c2a571aee2463b65f886ccaf03", + "domain": "miner4.nodely.store", + "provider_id": "6cb765b21a45180ee37250a5f2dbc52980fdc73b1c5383f6e82a90a2b90eab88" + }, + { + "client_id": "d401757bc7eead3f9dc593ebdd196c9357c1e2aa00583376b54683d20e142e05", + "domain": "miner1.nodely.store", + "provider_id": "c8197eb5fac7d565bb54e6c8f952426f472bbe0c045db38ea3f2482171c4d0bc" + }, + { + "client_id": "eb0a40cdaca8d310ea3adfc97ed77f4cfec8191a567882ab15f1422a7f9a50b5", + "domain": "hel.sdredfox.com", + "provider_id": "4840c06f0645beb06700b2f667e7d618645ccab91f28e8441ec96240fb01c8e5" + }, + { + "client_id": "b575983ba2305c99a31e83961dfb49e6f4780cfefe8e26f3695665508c8564dc", + "domain": "gagr1.cust-zcn.zeroservices.eu", + "provider_id": "2e8076fb7791576930b0d66f30d7d7a19fc306f0d125bebe2d07dd0e1bf7a697" + }, + { + "client_id": "cc0b8325117f5ea975b711614e2b16ab3787e6f2dd37ca9304354add97b184ff", + "domain": "zcn-helsinki.qs.capital", + "provider_id": "edc42f9b3eda8ab912d016d677aa667e426d45f9fe487c28037e6c53ff7d7019" + }, + { + "client_id": "ad244318fa80c674fe69b02cfdb022084fb70ddd4552a24e806e99b7c5f4fe83", + "domain": "sharder.qs.capital", + "provider_id": "67c9758fb9f12aa7108dc04f615b61494df99d485b4ac32b25985a506e5c2866" + }, + { + "client_id": "db6157231f9a7a080ec43e6054fd3af80269463a654ce60c38048a635c8025e3", + "domain": "miner8.nodely.store", + "provider_id": "d99db249703d8dac5376dc209394ed27a0d7f417ca962b97545976a879fa82e2" + }, + { + "client_id": "d2f0089f99d0bc627b9bbb99d7cb7620a22bd6058260fb51efa3fb569700f98e", + "domain": "fi-m.th0r.eu", + "provider_id": "632c68e3d0851443f3f2efa35dfd41926027a0ce08860578aea2ec040e39561a" + }, + { + "client_id": "4cb0731470c03aa6df302e6b6d21d47b8d43d18b8035174faa237f46bbbef878", + "domain": "es-m.th0r.eu", + "provider_id": "1a1162071af9e9933061a2c87086b5c0596b23845dd2fe6989423b13b9dcb623" + }, + { + "client_id": "aa73d84504182953a8627a4338c3c4e310c9baa559792ed225dd4a3e3302db53", + "domain": "es-s.th0r.eu", + "provider_id": "15504a30d83e3bf20e47c6c391d557cdff2131aeef4d5253a60026d087e8e2a0" + }, + { + "client_id": "dc272d8c6ccf095ae304fab5697578782965a1cf86992a99cd6e6f2ad64e59ea", + "domain": "tasmaniandevil1.cust-zcn.zeroservices.eu", + "provider_id": "e3f0db128c1fa55ac6f6a2b3f18cd338eda37d16c14135377eb2d10daa8daec8" + }, + { + "client_id": "11424c7c32edd2fa013ea4485926ead86e7deb6e43069b91a4678873310e7962", + "domain": "msb01.safestor.net", + "provider_id": "745bda9ccc5cca6860cb41edb56f0fcd44225097eff87c6cf94eba965e74fe05" + }, + { + "client_id": "972991387c27b44d788ae9491ac8f9064270fecfcc05a9c01c7e57cacfae72a6", + "domain": "zcn-sharder.safestor.net", + "provider_id": "13aa7c01ff3fdb3b91d9186beb85b1e0ecc111fa6d58091646159ac40e344bf9" + }, + { + "client_id": "fd0527ba18541dc10f42b05091182068a66c15c9539b25ad8ff656bc76545c08", + "domain": "msb02.datauber.net", + "provider_id": "97f9ab3dc61851794d384bf2b00acb1152fbdb83da0bdc42cd44ea4a8a16224a" + }, + { + "client_id": "67d899e7f45ba9dd2d8c6d6daa9e75894bb5c752416b910c25b72308ee50875d", + "domain": "msb01.datauber.net", + "provider_id": "51c783094c96720dc36bf83eca8c01797eff92a9212e894ff2afa564e740308f" + }, + { + "client_id": "15188171e7c5f7ee7de597da71b2960ee30caa921726c0abe16f5aaca397fc94", + "domain": "victor1.cust-zcn.zeroservices.eu", + "provider_id": "f188bda64f1dee8e0703af05818c6c64212700125018e38898226756b8f9301a" + }, + { + "client_id": "625d6d28aee71d3c470c424dd173c0dcc90735c4ca123edb8a68fc8decdd85c7", + "domain": "m1.msb4me.eu", + "provider_id": "1f292beb8097e124b277edf03c8e7bbdb8d8c59b8715144a1320677bb438055e" + }, + { + "client_id": "490c2c9efb3e67e1baa6b320d1b122c0bfb6625605ed226d58e5509d8b03358e", + "domain": "s1.msb4me.eu", + "provider_id": "6f6ddc77e2c439a291a41a76c5888337303bc1c2a5c8325806eb8910112c41a9" + }, + { + "client_id": "2ef62fce52607017abf3c2493f681fbd6f7147f19f0bde0d593315157a5f387b", + "domain": "m2.msb4me.eu", + "provider_id": "120f8381d801fa86a39548780474430e5e0ced73c2b0631dfbb33d2d7afd4d33" + }, + { + "client_id": "875f3cd17fbdad01a2b7d402734582b9a9f76b7795d74869849d9b753c620043", + "domain": "eggplants1.arturocapital.com", + "provider_id": "9b680da4232ca479309f6334b7ec190226b37d66c29258bd93563abe9fc14113" + }, + { + "client_id": "76a77518aceec768fb5699ec91942633976223cebd696098af9a14718ecd63a8", + "domain": "gongshows1.bnocs.com", + "provider_id": "d7cce80fc05c09fe5019a39f264c8ddea18139b2de12b31e351b985a961263f2" + }, + { + "client_id": "e398e3b2626f13aa782e952cfa1ed93a56143b5c834921abe8bded9cb3dca825", + "domain": "ketos1.bnocs.com", + "provider_id": "9e498ea78295d03bd7e07d8bea7a28b0e35ebbf2b622e4c058bc349385062372" + }, + { + "client_id": "f20d07d8eb65f21215d8de73862fa024e2db73fa03718bcf187879d376cc3dd8", + "domain": "legolass1.bnocs.com", + "provider_id": "d1548cda259cd3b76621567255b8f4f4681d1613d20294a9e766e0e4fac41c64" + }, + { + "client_id": "2c6db250cc6598f3fcb3e195cc258649f3d459612a0027c016e6005bdf17c146", + "domain": "ironmans1.fixx.biz", + "provider_id": "400313f3b71bac55427e64de9a23f735ddb121d18b41b95a449e1a1e2541f27a" + }, + { + "client_id": "511b495000afafda540437b567f893818cd598880aee0c2ed9fe3a04db271d2e", + "domain": "donkeykongm1.arturocapital.com", + "provider_id": "5fea0897dabc032eef31f9068ce2c0bd6d6d5adcb924b1407aa73c0bf31ddacd" + }, + { + "client_id": "451f6dc57117365b86bb6af7c13e485996487d870829657ea162254643795320", + "domain": "donkeykongm2.arturocapital.com", + "provider_id": "20279b4ce6266324ee0b9449c32ee38af95ede0f6a1caac3e50939957666c02d" + }, + { + "client_id": "9a1dad868b0a861042e1e968f407a5b3f6e5598fd90f35cf784b54bb3d899f11", + "domain": "eggplantm1.arturocapital.com", + "provider_id": "48eb589ce93a40e1734a7dea15e6e28a5a608636bb92268e891f1aa1b5a4e9c1" + }, + { + "client_id": "ae563ea7750209c5a04dfca537e357b4757349d7b7edcfc5b147247b4eb9ef0b", + "domain": "eggplantm2.arturocapital.com", + "provider_id": "742d178c8eb7bea73601c76609bf6a5ec07c8036f434bfe2aa24336bd4fe71b2" + }, + { + "client_id": "771dd550688abd2b71b227df96f1ef034db09dbecbc7accd1383dc6871dd5735", + "domain": "interstellerm1.arturocapital.com", + "provider_id": "8ad5244bcc2676887d7040377c3a3310951199c17c7222d9d1538781221c4174" + }, + { + "client_id": "6ed2fe7b2c3ecaf0b9b51d287739017be905baed60302067b7c092de6257061e", + "domain": "interstellerm2.arturocapital.com", + "provider_id": "0ce1d095a87b0731ed637c824388271115295007d6ac47178b5cd42c535c952b" + }, + { + "client_id": "5e474ae6ccf7d3868ad759a9a60177a475a96787f6e543379a524400ee7278ec", + "domain": "olympiam1.arturocapital.com", + "provider_id": "7d95b9a124035758593b5330143563f7f7d092874d4587701a4f5fa45e04faf0" + }, + { + "client_id": "4826d8ac40f256f17937a39d8f8d4c57cea80cc9439d379e066c377c9da8d2a4", + "domain": "olympiam2.arturocapital.com", + "provider_id": "acebe0dce47ea9924ec3a917a457e5d5fcd55848354c71da7913aad25e7216a3" + }, + { + "client_id": "dfdf61a0dd385514591c8f5ea5f05b1d40360b79cb208845dbd2f4ad3e47fbf5", + "domain": "mintym2.arturocapital.com", + "provider_id": "edf8ca21552efeb8ff9a7235d9c9928206a667d273ea27e0292a587855df821c" + }, + { + "client_id": "a0b48430dcd87465d3db6fe5201c92e49862b72ff9eaea162aef83311ee910d7", + "domain": "mintym1.arturocapital.com", + "provider_id": "81cca31e154e550b2fc7cfd6370f0fabb41e49b1626e62e6eee81844276ac5a0" + }, + { + "client_id": "2b2099464f7bf233e9194a8d42bdc923712ae30e0d35f3e03e46885abb33141a", + "domain": "niftym1.arturocapital.com", + "provider_id": "46c2b0464821a73bb572fe42a5a77b44437ddea7601ba0e0288f1307dc053652" + }, + { + "client_id": "b06a2a4f0b41de03c5bb3e5f15845d29f7245335880d24745974c99fe36647a5", + "domain": "niftym2.arturocapital.com", + "provider_id": "abe89eacd2d9340c00dfceb364122272438d8871c707bcfe4c48b40b76f4ffe8" + }, + { + "client_id": "cb6f21160288fa1de4ba44fa427eca014a2f65b086b7f117bc2c99933e5e3fd9", + "domain": "gongshowm1.bnocs.com", + "provider_id": "96a91b1652706dc5faecee4edbf200e5d8b8e3d51767b770e58e83f221d48dd8" + }, + { + "client_id": "2479bff9b1082896228f6dc6264ccd650af98c89ec1b603f116c04b9f076a061", + "domain": "gongshowm2.bnocs.com", + "provider_id": "dc30af2941530930afb04bc7a61734395cf2906b9f6a8bb9bc77367d95848df9" + }, + { + "client_id": "4009ac2a04bf01fac07b586fa6cc95a5219f99183b3747cdea98ba4e3468aebf", + "domain": "ketom1.bnocs.com", + "provider_id": "18663cad133baac6e11cea6f9d5e0cb45e1692539a22ca45e73cfebf844951c7" + }, + { + "client_id": "3aa30a46854c8ca8038d8e9e72c9b95431b828d4cf5b7082a5a945d74a7b8d06", + "domain": "legolasm1.bnocs.com", + "provider_id": "1c70d930f04e484e46d9d09f4bfffde7354970c50f64551351698cf3895a7826" + }, + { + "client_id": "8327c24989d1d2872cba5f41915fe5a7e17b93ca540746ca90807c4504565791", + "domain": "legolasm2.bnocs.com", + "provider_id": "5157ccfa24ad2cf79801a69b9dc95e8ffc03a1b65b1278c2c3487e8f362052bf" + }, + { + "client_id": "ea5afccec5e6c21860d205e49755101e0618c7222fb35816f75d0e86fe1b842f", + "domain": "hightowerm1.bnocs.com", + "provider_id": "6d1d838a85731d853ee273210919daeaa50844d7e04bdfae0fbf873cbe984fa1" + }, + { + "client_id": "21c9df3a7989b07d72372fcf87f875bf5a207f8a56f66aa28feaa876d2a4f798", + "domain": "napoleonm1.bnocs.com", + "provider_id": "8dc49e65b363f6f730f90b28cfe0969518f9bf94b83306f5ad9cc5cca459ec0b" + }, + { + "client_id": "528728f1974e332fa3695e14b328c2438ad65c9c1ee1381c09b3fbf6e62b082e", + "domain": "napoleonm2.bnocs.com", + "provider_id": "cea0aea675acd952800ba4ad28ccd690b200d41357a32b47b4744210cf421a60" + }, + { + "client_id": "068437ab2d00b0b66b5a10cca3d25728e14fc36e74d6d62ecd8f8355ae387f03", + "domain": "ironmanm1.fixx.biz", + "provider_id": "3f270b9f5029b14b32dcbe167b33865a2ecdfa752ad10277cfdfa45384c0bd4a" + }, + { + "client_id": "b289389d4e80c9a0e7ee8e90daa50711db0c6029016d023fa2c2834ffd7382a6", + "domain": "ironmanm2.fixx.biz", + "provider_id": "9a1679b06d44fc01fd8bc2019adda93f92a155e5d66f48f1123da97b78d33744" + }, + { + "client_id": "b2a996f11aabe30cb8fe50ceebb955bf644ca4ccc921d977e1f8643db18383bf", + "domain": "enemym1.fixx.biz", + "provider_id": "8b5b51672168988e964bcab5945df6b0e9d6f9af6b6e1c79aa2a5cc8aa0e451b" + }, + { + "client_id": "7009c406cd26a490c388b66a118fd4cbec6512992ac3667dce83e3cdf38e9089", + "domain": "enemym2.fixx.biz", + "provider_id": "46ea4695d967926e4c71e4203ef98cc8e8007526632ce6ce7b31d998450dfdb6" + }, + { + "client_id": "1b3c7abe9b953716d5485dd352d6913d13926918e2a3e63a8d54b54250d20a2d", + "domain": "miner.spideyzus.com", + "provider_id": "4f5dc2ddf98b4e5c8012234de86dfd66446e05b79916a9d7c6233c12e9f84f20" + }, + { + "client_id": "6ff01f128cae47c32cc7fe9a2b93724b3c3706678055d65315a380d517030d78", + "domain": "sharder1.bytepatch.io", + "provider_id": "3409d86fb6b26d2d966c7e00561f7c3b9e337b5908c3989b34414deae5efb8c6" + }, + { + "client_id": "7adf1727b3aaa53a9ecbb2bae04ea36e39a26ccdbc2dc1383a1a5a33e5d1e6d1", + "domain": "miner1.bytepatch.io", + "provider_id": "9bd0142648151c98c88523fc96656c368386fc8e92027315a8543603af681562" + }, + { + "client_id": "211b19ed9889b78a4b5f10e0447a819b8f18c44588598d9daa3a74769991f933", + "domain": "zusminer1.s-togawa.com", + "provider_id": "8e4de505346380887ce01da5838526adde727a4d569b991dfa4ff716b34387e3" + }, + { + "client_id": "f0533852cc0fca839409773262886cb94fd3668fe1afa162c80002503438cec9", + "domain": "zusminer2.s-togawa.com", + "provider_id": "f74514b0398e9e911deca5a68404b9fb5add40fd37427f480af1162fd9172920" + }, + { + "client_id": "f8170a06945fbe363d55daf364b5c91e05a7868aeacb35186b33e9d7cc0a6cb2", + "domain": "kngrorig.tanode.net", + "provider_id": "9ad5587ee158b8cee9917ba152840b88a6f4a9ee452ad37ce95f4a90e0555449" + }, + { + "client_id": "4033e8cb4d152f7a21b6ae1dec8ebbe0a23c77c09fb99b0f351bfb7842564f61", + "domain": "bourbon.rustictrain.com", + "provider_id": "5e56de39b47adb53ddee1bb742c5e3a2c85f3048e3d026344efc81305593d624" + }, + { + "client_id": "8c495fcbe1da7bcf84278032793eb1429b74c04061224300f87f06940e1093c5", + "domain": "sharder1.moonboysmining.com", + "provider_id": "f32e7fc63a3f264b92c59885ed07866b6c4d0f2dffb0a72690ecb1e96cfcaa59" + }, + { + "client_id": "4698c3f59c7c1f784c8b4009a5ef2b7afe9cce4c91c82088fe08e070215cc6fb", + "domain": "sharder2.moonboysmining.com", + "provider_id": "1038626bd039b9fa33f4a956e3d30a558f7007e6f99634b3724afe2c290554bf" + }, + { + "client_id": "33126eed842c32c72519f9114fc74d89d5213cd1ef9452fc606e6c364f4f0825", + "domain": "miner1.moonboysmining.com", + "provider_id": "86886b87042cca2a5d9662e46163b6c18e7d889a2a45f2e8182e5bd80fb314ae" + }, + { + "client_id": "9b4cf1be8518217d12310eb0cc8326e4346a8ec6e0aac6eb87dc33c66a892e12", + "domain": "miner2.moonboysmining.com", + "provider_id": "71fee18d7da67b5b4706840ffff86c2958a6569e98b9f738c3bfe85bde316a01" + }, + { + "client_id": "1103baaa1af52f75214969dd87a558429bdc7e874d9d7a63814de88686cf6cfe", + "domain": "m.sdredfox.com", + "provider_id": "0323b5985936d73ad496979accaa4e18058e34ac7fc8539ab6f6607011c4dc8e" + }, + { + "client_id": "8668cc85b54d26d75872a30f3987f5eb944e8d5b0c28f3c495e37b7ada149fb3", + "domain": "s.sdredfox.com", + "provider_id": "9c7db5abd2716c57ac3f60f6d10f04a1fc350ddd778dcbb37ebefefb713f2987" + }, + { + "client_id": "9d62ea206d966db4c0b8eee0ed5a6f06ca77e479c953d53896a59a5bd97431f7", + "domain": "miner2.nodely.store", + "provider_id": "ac59d556c8bff3ccaeca40239866d66c63bfaffb9af4c212816d4de4f75fbb92" + }, + { + "client_id": "6459a4db22bdde08795bf2177ec5fc5d995053f66b6b1249405103c26ae33960", + "domain": "sharder.nodely.store", + "provider_id": "66d8276e467d0fb2c3c49bca51e631c24871784cf9739d3cccb1f9d2a932e7ac" + }, + { + "client_id": "12d90cf9fe4a39589ad6d844d97c3717ba385632c1a1d7a3e04105bc789a92f5", + "domain": "miner7.nodely.store", + "provider_id": "277f5d2798a39ef3219586e23e6b41846f749a724e46e3e281bdfc281e623ab5" + }, + { + "client_id": "e05ab6465fb8bd15f1ff378112097a2f8407a029a12ac610915f19d7f636c215", + "domain": "jc7.jczcncloud.com", + "provider_id": "dad2c08eac4b36f6d03e3ca1902eeb95351cd810fbd0ca06ceec2d0070f13240" + }, + { + "client_id": "c91e6bae707288aef8bd2c5894512944dbf43da58f65e02f0f7c77c02e63c9ba", + "domain": "jc4.jczcncloud.com", + "provider_id": "1e197eb63c5bba3344b0109694b41b4a271c51086837a9b83416a50121816d7a" + }, + { + "client_id": "8f60100499ebd4a67f4cbb6d6765aa86733fd75da686fcda04af2269f8c6db1b", + "domain": "jc3.jczcncloud.com", + "provider_id": "02dd397fbf5774982284f4d555f6bf63e4dd708f995a797de3526fa4bdff7791" + }, + { + "client_id": "688f5716085b8929271dd62e7bc7963464e4d56826ec7b28d19506418dd029bf", + "domain": "jc1.jczcncloud.com", + "provider_id": "10b380afea93a03baddb80d396ee8968342c3dced9111811e72c37a46233efd8" + }, + { + "client_id": "d2389a8402d3321c55ee317490ad7b6628b1e6b43ca70af7ef1b332026ab1705", + "domain": "jc6.jczcncloud.com", + "provider_id": "3bceeb8b00f00a3208b460545fffe0566ae12d893b3651406ee454d5005e45d7" + }, + { + "client_id": "aef900cfe57e33975180313568ee2c164d3c59cd9cc17c04701c34f98216bab7", + "domain": "jc5.jczcncloud.com", + "provider_id": "8aec4bef5f19a907f402ba7027979d29d5e34cc426704388308efe08eed2bbd1" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "pointer.phatgiga.com", + "provider_id": "a4edf923651857f0dde722c4fca169390dbdb943a1ba7aa4a4695092cbd64048" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "reaper.phatgiga.com", + "provider_id": "0b5c9a8ef85fee70ed275b860a6a62ac1337f93825b446f4fd74791963b85e2f" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "stinger.phatgiga.com", + "provider_id": "b138ab1e4eb6dad58eac988365bfe6343e06d6233887a8327365a7d151c2faee" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "hunter.phatgiga.com", + "provider_id": "0acfae0a6c71a05ecaf2fcd36382e3505298b31a444c4f13e562473c8436e0cb" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "panther.phatgiga.com", + "provider_id": "7eb88c6e8677fb7a267aaefbfae982a5e9f91f43d54d171d537e5245f4d8ca73" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "predator.phatgiga.com", + "provider_id": "55e52a68e5dab66942e6681425685c954174ad16c1d54cf30306ee4fa498cb3d" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "falcon.phatgiga.com", + "provider_id": "82180ed64eced70141b7524b0f37cc8c0ef952241429240e7eda26d78520b9a7" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "eagle.phatgiga.com", + "provider_id": "445b6def7718a5ad4bab2798f7bc386e49689644da9ffe7d7654e6455740e5f6" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "hawk.phatgiga.com", + "provider_id": "641fa7f306ebb7b4b75f4221423ce065e9b1e72547c775e85fcf2e607f0182e7" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "hornet.phatgiga.com", + "provider_id": "6afa5c7670a6a0304b7f7738cbff981fb3e6f3c4f272f3faa8e22c038cd17f28" + }, + { + "client_id": "eba98830881187e5ef5c8529877d3a738362699a6b0b215cc196090cfc36f3e3", + "domain": "wasp.phatgiga.com", + "provider_id": "316104852971c9a9a8b17ae8ba19851766311fa94c158023a7c4c2f561cd9f36" + }, + { + "client_id": "9232ba8bae473f1ed4550b9a2b556b1ed131fda4dfe8723567a45ec235e8f7dd", + "domain": "jc2.jczcncloud.com", + "provider_id": "3e6069913b384720ad3f0cc320ffd07f5d0b7018f6806ed81e78ea7d5d0d83f8" + }, + { + "client_id": "dce8facb60cd16d9395f31c4300bcd7915ea620e241e6210f817917591f260cf", + "domain": "quant.zusfiver.com", + "provider_id": "42af9b62466e290640ce7c347384abed80618559ba59bba16577de85989dc7ab" + }, + { + "client_id": "6bb8a578d6230f49da0065d6cf9c30512f5bc3bd8f0dafb2d4730aca3c290aca", + "domain": "zussharder.thecoolchase.com", + "provider_id": "36532a46e5fff9e856f2c0de2e2456905b03b850f3dde3d0c905d5b5991b0881" + }, + { + "client_id": "e6069f8626ad31b55a0d3ea4f3f63493ae205e3266e38d7d0b8d2d2872a8c875", + "domain": "zusminer.thecoolchase.com", + "provider_id": "012765c29ad56e422e8404f8c99c69c1721c49f0a2e5a3645ecfce806e298f5f" + }, + { + "client_id": "d8e7f5642d2433aaa64785bf0c6de5f4322284456cf74248ab4fff908e0de0b2", + "domain": "zus-miner-1.krates.ai", + "provider_id": "6b285d1442ce9ebbbfed43056de097dd477a8505f0bae189ef50f34eb18f95ac" + }, + { + "client_id": "baffe59a432b8159d0ec2a1f6236fdeb3d776c1b6c355851161c64d93b3cdd50", + "domain": "zus-miner-2.krates.ai", + "provider_id": "fa74867c22e4e7cdac265dd2c3226712814db38a151146538d9a7e915bfa1f41" + } +] \ No newline at end of file diff --git a/0chain/others/initial_states.yaml b/0chain/others/initial_states.yaml new file mode 100644 index 00000000..c36d5cc2 --- /dev/null +++ b/0chain/others/initial_states.yaml @@ -0,0 +1,368 @@ +initialStates: +# minersc +- id: 6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d9 + tokens: 850000000000000000 + state: + # miners and sharders + - id: 6cea31163d04c3f77e27f4803201863c468cd9c8697fb0b39fbf6d89e20962d9 + tokens: 10000000000000 + - id: 742d178c8eb7bea73601c76609bf6a5ec07c8036f434bfe2aa24336bd4fe71b2 + tokens: 10000000000000 + - id: 277f5d2798a39ef3219586e23e6b41846f749a724e46e3e281bdfc281e623ab5 + tokens: 10000000000000 + - id: e7d0e6d410a2090194d2d7e35e46619fdf0779f904c878e266be3f11b3d67d11 + tokens: 10000000000000 + - id: 9ad5587ee158b8cee9917ba152840b88a6f4a9ee452ad37ce95f4a90e0555449 + tokens: 10000000000000 + - id: 7eb88c6e8677fb7a267aaefbfae982a5e9f91f43d54d171d537e5245f4d8ca73 + tokens: 10000000000000 + - id: 82180ed64eced70141b7524b0f37cc8c0ef952241429240e7eda26d78520b9a7 + tokens: 10000000000000 + - id: 48eb589ce93a40e1734a7dea15e6e28a5a608636bb92268e891f1aa1b5a4e9c1 + tokens: 10000000000000 + - id: dc30af2941530930afb04bc7a61734395cf2906b9f6a8bb9bc77367d95848df9 + tokens: 10000000000000 + - id: 445b6def7718a5ad4bab2798f7bc386e49689644da9ffe7d7654e6455740e5f6 + tokens: 10000000000000 + - id: 76c296511da5de03cebf5a491834a363c45f331285330a4aaf5cc2df4c87bbe3 + tokens: 10000000000000 + - id: 90c91fbd753afbe72be83a9da1dc3012606f76b0da172687e237dc46375ff349 + tokens: 10000000000000 + - id: 690fd892b76798305ec33e196d8e2f8ba17a15eb4a913b83d9edfcc49a475772 + tokens: 10000000000000 + - id: 9bd0142648151c98c88523fc96656c368386fc8e92027315a8543603af681562 + tokens: 10000000000000 + - id: 6cb765b21a45180ee37250a5f2dbc52980fdc73b1c5383f6e82a90a2b90eab88 + tokens: 10000000000000 + - id: d99db249703d8dac5376dc209394ed27a0d7f417ca962b97545976a879fa82e2 + tokens: 10000000000000 + - id: 0323b5985936d73ad496979accaa4e18058e34ac7fc8539ab6f6607011c4dc8e + tokens: 10000000000000 + - id: d8cb128e8a0bcf2ae8eaec44600ee5a9c888c3603a9693206535dd376c6ff3ad + tokens: 10000000000000 + - id: 6ecf4b024ad22026e60faedaff19314cc6e54fac3563cd91be5984607de02205 + tokens: 10000000000000 + - id: c8197eb5fac7d565bb54e6c8f952426f472bbe0c045db38ea3f2482171c4d0bc + tokens: 10000000000000 + - id: 96a91b1652706dc5faecee4edbf200e5d8b8e3d51767b770e58e83f221d48dd8 + tokens: 10000000000000 + - id: 4065bddb8e112ed6111883b4bd0791cae4a6389cf9df3abd1d617cf5ffba0053 + tokens: 10000000000000 + - id: 10b380afea93a03baddb80d396ee8968342c3dced9111811e72c37a46233efd8 + tokens: 10000000000000 + - id: 6b285d1442ce9ebbbfed43056de097dd477a8505f0bae189ef50f34eb18f95ac + tokens: 10000000000000 + - id: 67b1447d5483dcabd86a680400f27a2e99132b879181bdeb3549b768386e258e + tokens: 10000000000000 + - id: 316104852971c9a9a8b17ae8ba19851766311fa94c158023a7c4c2f561cd9f36 + tokens: 10000000000000 + - id: ac59d556c8bff3ccaeca40239866d66c63bfaffb9af4c212816d4de4f75fbb92 + tokens: 10000000000000 + - id: 1cd49a599196c129981a7fc760e614f56280a8619c850c2655d9263f3ddc778e + tokens: 10000000000000 + - id: a4edf923651857f0dde722c4fca169390dbdb943a1ba7aa4a4695092cbd64048 + tokens: 10000000000000 + - id: 18663cad133baac6e11cea6f9d5e0cb45e1692539a22ca45e73cfebf844951c7 + tokens: 10000000000000 + - id: 120f8381d801fa86a39548780474430e5e0ced73c2b0631dfbb33d2d7afd4d33 + tokens: 10000000000000 + - id: 0d9583ef583a495b62d2ef6f6dfc3613c4f7fdbb9aa5cf9f152966a7cc910b67 + tokens: 10000000000000 + - id: 4d7ae4c40ed17cb49d6e6a5c7b1c3e4a8cca871844f7e4709673ff30ee858ab7 + tokens: 10000000000000 + - id: edf8ca21552efeb8ff9a7235d9c9928206a667d273ea27e0292a587855df821c + tokens: 10000000000000 + - id: 3bceeb8b00f00a3208b460545fffe0566ae12d893b3651406ee454d5005e45d7 + tokens: 10000000000000 + - id: cea0aea675acd952800ba4ad28ccd690b200d41357a32b47b4744210cf421a60 + tokens: 10000000000000 + - id: 9a1679b06d44fc01fd8bc2019adda93f92a155e5d66f48f1123da97b78d33744 + tokens: 10000000000000 + - id: 0acfae0a6c71a05ecaf2fcd36382e3505298b31a444c4f13e562473c8436e0cb + tokens: 10000000000000 + - id: 59e5211ee0f0f2c3ba146d0f4be3bf57dff67b706ee79ec6a62c36d6ae5a2889 + tokens: 10000000000000 + - id: 86886b87042cca2a5d9662e46163b6c18e7d889a2a45f2e8182e5bd80fb314ae + tokens: 10000000000000 + - id: c0a37285a95fb8b7c186444aabaa1641beaf1be9e25ed183dc8b22d55313fd62 + tokens: 10000000000000 + - id: 1e197eb63c5bba3344b0109694b41b4a271c51086837a9b83416a50121816d7a + tokens: 10000000000000 + - id: 907b45f6c9125f050b5e8029b33f434cecf25270acb67c6695c80ae622dfd119 + tokens: 10000000000000 + - id: f74514b0398e9e911deca5a68404b9fb5add40fd37427f480af1162fd9172920 + tokens: 10000000000000 + - id: 8b5b51672168988e964bcab5945df6b0e9d6f9af6b6e1c79aa2a5cc8aa0e451b + tokens: 10000000000000 + - id: 9b7de13ce1e8bb98bddae69d2163711592a07dc0a5b47ce2d84c9810cfe348f7 + tokens: 10000000000000 + - id: 7d95b9a124035758593b5330143563f7f7d092874d4587701a4f5fa45e04faf0 + tokens: 10000000000000 + - id: e3f0db128c1fa55ac6f6a2b3f18cd338eda37d16c14135377eb2d10daa8daec8 + tokens: 10000000000000 + - id: f188bda64f1dee8e0703af05818c6c64212700125018e38898226756b8f9301a + tokens: 10000000000000 + - id: 4fd355848e3e3efa9c87f4627db1395262a3900402ef7712a347573ae58b1697 + tokens: 10000000000000 + - id: 6afa5c7670a6a0304b7f7738cbff981fb3e6f3c4f272f3faa8e22c038cd17f28 + tokens: 10000000000000 + - id: 46ea4695d967926e4c71e4203ef98cc8e8007526632ce6ce7b31d998450dfdb6 + tokens: 10000000000000 + - id: 6b7f2dd806f4edc410fcf2502b790aa0c5e1b3c3bb1979a26b3504f18f62ea2a + tokens: 10000000000000 + - id: 3f270b9f5029b14b32dcbe167b33865a2ecdfa752ad10277cfdfa45384c0bd4a + tokens: 10000000000000 + - id: 8b56a1ce13d7eb00c0c7dc0a0e3e840883a63faecc7f4ec6e6a518eaf112c755 + tokens: 10000000000000 + - id: f90efe8bfe60ff66bf560d3a795d5d8970148b3e939de1bfc1fec35c5b6b88c2 + tokens: 10000000000000 + - id: 71fee18d7da67b5b4706840ffff86c2958a6569e98b9f738c3bfe85bde316a01 + tokens: 10000000000000 + - id: 012765c29ad56e422e8404f8c99c69c1721c49f0a2e5a3645ecfce806e298f5f + tokens: 10000000000000 + - id: 3e6069913b384720ad3f0cc320ffd07f5d0b7018f6806ed81e78ea7d5d0d83f8 + tokens: 10000000000000 + - id: b138ab1e4eb6dad58eac988365bfe6343e06d6233887a8327365a7d151c2faee + tokens: 10000000000000 + - id: 4f5dc2ddf98b4e5c8012234de86dfd66446e05b79916a9d7c6233c12e9f84f20 + tokens: 10000000000000 + - id: fc7438aa82af628a51ad856116bf4befe3edbfc9e28575ed55065057aa4cf5cd + tokens: 10000000000000 + - id: 92457f56e1e429e0a69d268269d5e5e941c972efbdb2f1f9daecbca62b42f1d5 + tokens: 10000000000000 + - id: 42af9b62466e290640ce7c347384abed80618559ba59bba16577de85989dc7ab + tokens: 10000000000000 + - id: 0ce1d095a87b0731ed637c824388271115295007d6ac47178b5cd42c535c952b + tokens: 10000000000000 + - id: 5b888875a8dc93be1ea20fbe98a3bef04634830b089e9cf8b6bbdc0b633cf943 + tokens: 10000000000000 + - id: 4e1108ccd44a1e84b7a43b3ecfddfc7afda8ae762c323b98ff70935e943d472d + tokens: 10000000000000 + - id: 54cb54ac71a0739005cc1332cd4cdfc3422a5a62f9c983ea97811d019b6c30e3 + tokens: 10000000000000 + - id: 745bda9ccc5cca6860cb41edb56f0fcd44225097eff87c6cf94eba965e74fe05 + tokens: 10000000000000 + - id: 02dd397fbf5774982284f4d555f6bf63e4dd708f995a797de3526fa4bdff7791 + tokens: 10000000000000 + - id: 641fa7f306ebb7b4b75f4221423ce065e9b1e72547c775e85fcf2e607f0182e7 + tokens: 10000000000000 + - id: 4840c06f0645beb06700b2f667e7d618645ccab91f28e8441ec96240fb01c8e5 + tokens: 10000000000000 + - id: 46c2b0464821a73bb572fe42a5a77b44437ddea7601ba0e0288f1307dc053652 + tokens: 10000000000000 + - id: 8ad5244bcc2676887d7040377c3a3310951199c17c7222d9d1538781221c4174 + tokens: 10000000000000 + - id: 51c783094c96720dc36bf83eca8c01797eff92a9212e894ff2afa564e740308f + tokens: 10000000000000 + - id: 1c70d930f04e484e46d9d09f4bfffde7354970c50f64551351698cf3895a7826 + tokens: 10000000000000 + - id: 1f292beb8097e124b277edf03c8e7bbdb8d8c59b8715144a1320677bb438055e + tokens: 10000000000000 + - id: 0b5c9a8ef85fee70ed275b860a6a62ac1337f93825b446f4fd74791963b85e2f + tokens: 10000000000000 + - id: 574e3b6e88b57771d2947aee62b170c4f71cca3dc7c6b24a1e4a23fdb27209a4 + tokens: 10000000000000 + - id: 632c68e3d0851443f3f2efa35dfd41926027a0ce08860578aea2ec040e39561a + tokens: 10000000000000 + - id: acebe0dce47ea9924ec3a917a457e5d5fcd55848354c71da7913aad25e7216a3 + tokens: 10000000000000 + - id: 8aec4bef5f19a907f402ba7027979d29d5e34cc426704388308efe08eed2bbd1 + tokens: 10000000000000 + - id: 34b3b8c4d5bf1690f29072123c33195407b130000d1a1f0ec55050ead77ba9da + tokens: 10000000000000 + - id: 5fea0897dabc032eef31f9068ce2c0bd6d6d5adcb924b1407aa73c0bf31ddacd + tokens: 10000000000000 + - id: 20279b4ce6266324ee0b9449c32ee38af95ede0f6a1caac3e50939957666c02d + tokens: 10000000000000 + - id: edc42f9b3eda8ab912d016d677aa667e426d45f9fe487c28037e6c53ff7d7019 + tokens: 10000000000000 + - id: 97f9ab3dc61851794d384bf2b00acb1152fbdb83da0bdc42cd44ea4a8a16224a + tokens: 10000000000000 + - id: 6d1d838a85731d853ee273210919daeaa50844d7e04bdfae0fbf873cbe984fa1 + tokens: 10000000000000 + - id: ff716e8475cfed98c55ac2d9c09c15b5bd90da68620eea6eb3209610a7516cff + tokens: 10000000000000 + - id: 00175a3bcfeff85551e7348346286d9971e7f23678fb681ccba91e836a411ef8 + tokens: 10000000000000 + - id: b2fd91df13c013586e911979a401dc624e61735185655b48bf391e9e37472cf6 + tokens: 10000000000000 + - id: 8dc49e65b363f6f730f90b28cfe0969518f9bf94b83306f5ad9cc5cca459ec0b + tokens: 10000000000000 + - id: 1a1162071af9e9933061a2c87086b5c0596b23845dd2fe6989423b13b9dcb623 + tokens: 10000000000000 + - id: 81cca31e154e550b2fc7cfd6370f0fabb41e49b1626e62e6eee81844276ac5a0 + tokens: 10000000000000 + - id: abe89eacd2d9340c00dfceb364122272438d8871c707bcfe4c48b40b76f4ffe8 + tokens: 10000000000000 + - id: 5e56de39b47adb53ddee1bb742c5e3a2c85f3048e3d026344efc81305593d624 + tokens: 10000000000000 + - id: 55e52a68e5dab66942e6681425685c954174ad16c1d54cf30306ee4fa498cb3d + tokens: 10000000000000 + - id: 7b64976ab54e38165ce6049fe6e2f4fe34a6c0929e486cb8e5e535c436ed1853 + tokens: 10000000000000 + - id: 5157ccfa24ad2cf79801a69b9dc95e8ffc03a1b65b1278c2c3487e8f362052bf + tokens: 10000000000000 + - id: fa74867c22e4e7cdac265dd2c3226712814db38a151146538d9a7e915bfa1f41 + tokens: 10000000000000 + - id: 8e4de505346380887ce01da5838526adde727a4d569b991dfa4ff716b34387e3 + tokens: 10000000000000 + - id: dea18859f12a805ec66a23a0b55dab47cf6f5d34e74833bbe59149558b84a83d + tokens: 10000000000000 + - id: 2e8076fb7791576930b0d66f30d7d7a19fc306f0d125bebe2d07dd0e1bf7a697 + tokens: 10000000000000 + - id: 13aa7c01ff3fdb3b91d9186beb85b1e0ecc111fa6d58091646159ac40e344bf9 + tokens: 10000000000000 + - id: 9e498ea78295d03bd7e07d8bea7a28b0e35ebbf2b622e4c058bc349385062372 + tokens: 10000000000000 + - id: 400313f3b71bac55427e64de9a23f735ddb121d18b41b95a449e1a1e2541f27a + tokens: 10000000000000 + - id: f659faad3a0d31e971b83fffcfc47ee9f3b4ea2dbc3234684a0abcd5d6c05d27 + tokens: 10000000000000 + - id: 3409d86fb6b26d2d966c7e00561f7c3b9e337b5908c3989b34414deae5efb8c6 + tokens: 10000000000000 + - id: d7cce80fc05c09fe5019a39f264c8ddea18139b2de12b31e351b985a961263f2 + tokens: 10000000000000 + - id: 6f6ddc77e2c439a291a41a76c5888337303bc1c2a5c8325806eb8910112c41a9 + tokens: 10000000000000 + - id: 1038626bd039b9fa33f4a956e3d30a558f7007e6f99634b3724afe2c290554bf + tokens: 10000000000000 + - id: 5ccda6733db5e582ec67f537288f9be4c28d1518242210ad4322cab1f74670c3 + tokens: 10000000000000 + - id: 9c7db5abd2716c57ac3f60f6d10f04a1fc350ddd778dcbb37ebefefb713f2987 + tokens: 10000000000000 + - id: 081f5ead92b48b62209ae59b8fdfe841fbd73b7767246d6d8ec6877a9d3de75c + tokens: 10000000000000 + - id: 5e6d077f09291e0a2fcdb21f5415f40d22766b71ca9218ffb51db8100a3e6ce9 + tokens: 10000000000000 + - id: 15504a30d83e3bf20e47c6c391d557cdff2131aeef4d5253a60026d087e8e2a0 + tokens: 10000000000000 + - id: f32e7fc63a3f264b92c59885ed07866b6c4d0f2dffb0a72690ecb1e96cfcaa59 + tokens: 10000000000000 + - id: cfeb0be57fb31cae74fc752a02d5824e43d2e5cb4fffd930aa4fdfe4e0633afa + tokens: 10000000000000 + - id: df2d7ad41bd12b55018c1e139d4aca680a66e4fa2d55d6c6b0156070793780eb + tokens: 10000000000000 + - id: 0781a11c2733b048130c89884dd283b795ad0beb97502c70bab302a5712d6426 + tokens: 10000000000000 + - id: 66d8276e467d0fb2c3c49bca51e631c24871784cf9739d3cccb1f9d2a932e7ac + tokens: 10000000000000 + - id: b5080b81e4b5475fd2bbb8bb5eb83ea00ce0dbf8b45756cd9468a1c9be7e01b4 + tokens: 10000000000000 + - id: d1548cda259cd3b76621567255b8f4f4681d1613d20294a9e766e0e4fac41c64 + tokens: 10000000000000 + - id: 7889075ac354dd79b1358d9203ecdab4f9a6217464aa273b38b34d54ee60db70 + tokens: 10000000000000 + - id: f12418ad14ef532978573adfb44c2158aa9ad344684a13de1066ea9bbfe0492b + tokens: 10000000000000 + - id: 0ef0d1f77ade53681e4a393020094ad4a7e34dd2c7003ef173ef8071d2c5672a + tokens: 10000000000000 + - id: 9b680da4232ca479309f6334b7ec190226b37d66c29258bd93563abe9fc14113 + tokens: 10000000000000 + - id: 36532a46e5fff9e856f2c0de2e2456905b03b850f3dde3d0c905d5b5991b0881 + tokens: 10000000000000 + - id: dad2c08eac4b36f6d03e3ca1902eeb95351cd810fbd0ca06ceec2d0070f13240 + tokens: 10000000000000 + - id: 67c9758fb9f12aa7108dc04f615b61494df99d485b4ac32b25985a506e5c2866 + tokens: 10000000000000 + +# storagesc +- id: 6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712d7 + tokens: 750000000000000000 + state: + #blobber + - id: f3adfff7b10461bec2f9fc7a38e5dabba6b7d81ea07bfd246f3a56a45f4576e4 + tokens: 10000000000000 + - id: 1014259bfaee2872b12fcd2212b3caa654ce0e1170759b59e2778282651f5beb + tokens: 10000000000000 + - id: dee43fa3b19fd98e0760d2c7c836cf014a3fc80e70aa42b879d49a640cb6c698 + tokens: 10000000000000 + - id: aaa1d41627662df025fe6e2609df873737dab5e6ee074a885a839e9eda8aa7e4 + tokens: 10000000000000 + - id: 2d37bb1793957654587b300043d57bc209e43aa4d76d981ca19681657c0ab7ee + tokens: 10000000000000 + - id: 9d860b8d6802bf85787e14efde3d730d0bd8cd9fc5021e9b4292cffe67d439d3 + tokens: 10000000000000 + - id: 4474af98002b293336087a3d687dcbc41c50ac0cd793e40944a42417489903b1 + tokens: 10000000000000 + - id: 8b1a8be5b81abb3230b33ceb04c7ab3ad60d388773be05f300db926e1376797b + tokens: 10000000000000 + - id: 2e039798edb1cd4f6bd3048ab70d9bb776adc5b5de999a104c1929fc224d2256 + tokens: 10000000000000 + - id: 84aee0f7024645cf0ad60a3fc25a67b13788c592e36b27435ee0d40e5b7e4fc9 + tokens: 10000000000000 + - id: 0c8a75bae8e2744f4eba7662937a811db1c7d076dd2f8cd0823b32b289788446 + tokens: 10000000000000 + - id: 6fddc9c78eaf2605a80547897128a6353db8b688f2616aed1efc2af3d24b7636 + tokens: 10000000000000 + - id: fec034be4b0c61b80a4cfcf46d8df9a4f9f77d6cd6e606ffb8f21214dbdd561c + tokens: 10000000000000 + - id: 20258e4536256a16bdaadc847eb88daa02da7c69279ed55e2c9c6e28bd099bde + tokens: 10000000000000 + - id: 6185ed7d2d800168afa29399d5696dfcd19a619d6c6de08030abe272c2f11ab0 + tokens: 10000000000000 + #validators + - id: 35d01bb33087c62ae634aebd3d7242de39d238b4106c87e441879eb666b580c5 + tokens: 10000000000000 + - id: ae97c1c77ba13689df4087b5b0a0e166eb69e768396b410a79d8887206a44455 + tokens: 10000000000000 + - id: 5d3edbbb053f318e7bfa8e7c3170193eb0c8c01f5edc524c215318fcc9c44cc2 + tokens: 10000000000000 + - id: 42b4494686e2db1f275502c4a752a7695d766a623bbaf0f0e0ca126e265771fc + tokens: 10000000000000 + - id: d0491283e1e7455aff5fb21e181d8968056452883a69e7c180c41f1c328795aa + tokens: 10000000000000 + - id: f040a75a423461b989216d0aeacb0559a919a5d9441d2e93b13e48c5b43bf331 + tokens: 10000000000000 + - id: 08f6b04616ef5b71cbfedff7b340a76b3d2a905aedd5c32400ba627367ab185a + tokens: 10000000000000 + - id: 73ebd5bac2f986368197e84fe47c25ad73df8e1c7cf9e397e702291cb5001879 + tokens: 10000000000000 + - id: 970b717c1da70eec8e77dd72df984e06429642f8371b2f5c8ab441a33d7f7663 + tokens: 10000000000000 + - id: b27eb2e48a26882085fe02ce4cd6202319cf48451691e85fa774fce541239dca + tokens: 10000000000000 + - id: dbedb47da7531ad2d332f93bdde9ae91663bfacdd0d5031a5d448568e1767579 + tokens: 10000000000000 + - id: 7e00877bc248bcd0e73a127b2278decc3da9114183cc52afe49957e856e3a513 + tokens: 10000000000000 + - id: 4f015f05917bee666407e587b0a1335bdf7b3ca299ffdaabd53f247dc0ca5639 + tokens: 10000000000000 + - id: ecc9d20ea988d444520a027fcfa0caae19e17f6387ae79c891b5d4744939db7b + tokens: 10000000000000 + - id: b788f192f1f57a7500ade68933bfd98dcbdde2f38483aad1e799275c44305e93 + tokens: 10000000000000 + +# zcnsc +- id: 6dba10422e368813802877a85039d3985d96760ed844092319743fb3a76712e0 + tokens: 2400000000000000000 + state: + #authorizer + - id: eb6c7c5400fb0b598dc60a2087099edde4046441e3c6c05a094db68ed01bcf1f + tokens: 10000000000000 + - id: 48b427870be072087a25e109be30381dcf77a985941f717bdf1961ee99c8bef2 + tokens: 10000000000000 + - id: 30f3b7baf625a816271383e2fa552bf88ffa92ae35aff3b4b85477b9a8c0cf6a + tokens: 10000000000000 + - id: be8175d8a20a6196da2c5f53ea283ab9d9b1f9cc66473f31ab18518ba15054a1 + tokens: 10000000000000 + - id: 3ad2da188c31b1516778a96e3124829a1e7b6a63fa67aeed103fdae67727bdc0 + tokens: 10000000000000 + - id: 9e75f3fb28bea5d5d4de4e97416570e785d8f8b9876320646078e434511465b8 + tokens: 10000000000000 + - id: c09d3d6b15f0a036deac1ac572fdfa7fc4c2e5548e18a75aec9c3077d4a302df + tokens: 10000000000000 + - id: 43d1df38c9cfbf6c4a576b14eaa4b1c9c4f6c890c2de853404b0b3f1b3f47b85 + tokens: 10000000000000 + - id: d5b514d328a4788bf27d2c03ec837dfa2bd73411caf0144fdd51fbf824184d07 + tokens: 10000000000000 + - id: f49615973fb975b5286db84f8c38f0b72204391fa9d0f9ad5d45c6b618fdb8fc + tokens: 10000000000000 + #team_wallet + - id: a4e6999add55dd7ac050904d2af2d248dd3329cdde953021bfa9ed9ef677f942 + tokens: 100000000000000000 + #owner_wallet + - id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 + tokens: 200000000000000000 + # reserved_for_future + - id: dc10a6c023e958c5f78ec25f5d670cdbcdb57beecd05d6876cf2f57567edcae6 + tokens: 200000000000000000 diff --git a/0chain/release_1.13.6.sh b/0chain/release_1.13.6.sh new file mode 100644 index 00000000..6343eed5 --- /dev/null +++ b/0chain/release_1.13.6.sh @@ -0,0 +1,131 @@ +#!/bin/bash + +# Installing yq +echo -e "\n\e[93m=============================================================================================================================================================================== + Installing yaml query on the server. +=============================================================================================================================================================================== \e[39m" +sudo wget -qO /usr/local/bin/yq https://github.com/mikefarah/yq/releases/latest/download/yq_linux_amd64 || true +sudo chmod a+x /usr/local/bin/yq || true +yq --version || true + +# Stopping sharder container +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop sharder container on the server. +=============================================================================================================================================================================== \e[39m" +docker stop sharder-1 + +# Stopping 0chain.yaml config +echo -e "\n\e[93m=============================================================================================================================================================================== + updating 0chain.yaml config. +=============================================================================================================================================================================== \e[39m" +yq e -i '.server_chain.dbs.settings.permanent_partition_change_period = 2000000' /var/0chain/sharder/ssd/docker.local/config/0chain.yaml +yq e -i '.server_chain.dbs.settings.permanent_partition_keep_count = 1' /var/0chain/sharder/ssd/docker.local/config/0chain.yaml + +# Creating script to be executed on the sharder postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating script for postgres. +=============================================================================================================================================================================== \e[39m" +docker exec sharder-postgres-1 psql -U zchain_user -d events_db -c """CREATE OR REPLACE FUNCTION public.create_partition_tables( + schema_name text, + base_table_name text, + partition_column text, + interval_size integer) + RETURNS void + LANGUAGE 'plpgsql' + COST 100 + VOLATILE PARALLEL UNSAFE +AS \$BODY\$ +DECLARE + min_value int; + max_value int; + start_value int; + end_value int; + new_table_name text; + old_table_name text; + create_table_stmt text; + insert_data_stmt text; + rename_table_stmt text; + reatach_seq_stmt text; + drop_base_table_stmt text; +BEGIN + -- Get the minimum and maximum values from the base table + EXECUTE 'SELECT MIN(' || partition_column || '), MAX(' || partition_column || ') FROM ' || schema_name || '.' || base_table_name INTO min_value, max_value; + + start_value := 0; + + -- Generate new partitioned table name + new_table_name := base_table_name || '_new'; + + -- Create new partitioned table based on the existing partitioned table structure + create_table_stmt := 'CREATE TABLE ' || schema_name || '.' || new_table_name || ' (LIKE ' || schema_name || '.' || base_table_name || ' INCLUDING ALL) PARTITION BY RANGE (' || partition_column || ');'; + EXECUTE create_table_stmt; + + -- Rename the old table + old_table_name := base_table_name || '_old'; + rename_table_stmt := 'ALTER TABLE ' || schema_name || '.' || base_table_name || ' RENAME TO ' || old_table_name || ';'; + EXECUTE rename_table_stmt; + + -- Loop through each partition, create it, and attach it to the new table + WHILE start_value <= max_value LOOP + end_value := start_value + interval_size; + + -- Check if the calculated end_value exceeds the max_value + IF end_value > max_value THEN + end_value := max_value; + END IF; + + IF end_value = max_value THEN + end_value := start_value + interval_size ; + END IF; + -- Generate partition name + DECLARE + partition_name text := base_table_name || '_part_' || start_value || '_' || end_value ; + BEGIN + -- Create the partition as a partition of the new table + create_table_stmt := 'CREATE TABLE ' || schema_name || '.' || partition_name || ' PARTITION OF ' || schema_name || '.' || new_table_name || ' FOR VALUES FROM (' || start_value || ') TO (' || end_value || ');'; + EXECUTE create_table_stmt; + + -- Insert data into the partition from the old table + insert_data_stmt := 'INSERT INTO ' || schema_name || '.' || partition_name || ' SELECT * FROM ' || schema_name || '.' || old_table_name || ' WHERE ' || partition_column || ' BETWEEN ' || start_value || ' AND ' || end_value -1 || ';'; + EXECUTE insert_data_stmt; + END; + + start_value := end_value; + END LOOP; + + -- Rename the new table to match the original table name + rename_table_stmt := 'ALTER TABLE ' || schema_name || '.' || new_table_name || ' RENAME TO ' || base_table_name || ';'; + EXECUTE rename_table_stmt; + + reatach_seq_stmt := 'ALTER SEQUENCE ' || schema_name || '.' || base_table_name || '_id_seq' || ' OWNED BY ' ||schema_name || '.' || base_table_name || '.id' || ';'; + EXECUTE reatach_seq_stmt; + + drop_base_table_stmt := 'DROP TABLE ' || schema_name || '.' || old_table_name || ';'; + EXECUTE drop_base_table_stmt; + +END; +\$BODY\$; + +ALTER FUNCTION public.create_partition_tables(text, text, text, integer) + OWNER TO zchain_user;""" + +# Executing script on the sharder postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Executing script on postgres. +=============================================================================================================================================================================== \e[39m" +docker exec sharder-postgres-1 psql -U zchain_user -d events_db -c """BEGIN; SELECT create_partition_tables('public', 'transactions', 'round', 2000000); SELECT create_partition_tables('public', 'blocks', 'round', 2000000); COMMIT;""" + +# Executing next partition script on the sharder postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Executing next partition script on the sharder postgres. +=============================================================================================================================================================================== \e[39m" +docker exec sharder-postgres-1 psql -U zchain_user -d events_db -c """CREATE TABLE IF NOT EXISTS blocks_part_24000000_26000000 PARTITION OF blocks FOR VALUES FROM (24000000) TO (26000000); CREATE TABLE IF NOT EXISTS blocks_part_26000000_28000000 PARTITION OF blocks FOR VALUES FROM (26000000) TO (28000000); CREATE TABLE IF NOT EXISTS transactions_part_24000000_26000000 PARTITION OF transactions FOR VALUES FROM (24000000) TO (26000000); CREATE TABLE IF NOT EXISTS transactions_part_26000000_28000000 PARTITION OF transactions FOR VALUES FROM (26000000) TO (28000000);""" + +# Deploying new release tag on sharder +echo -e "\n\e[93m=============================================================================================================================================================================== + Deploying new release tag v1.13.6 on sharder. +=============================================================================================================================================================================== \e[39m" +export TAG=v1.13.6 +yq e -i ".services.sharder.image = \"0chaindev/sharder:${TAG}\"" /var/0chain/sharder/ssd/docker.local/build.sharder/p0docker-compose.yaml +cd /var/0chain/sharder/ssd/docker.local/sharder1/ +sudo bash ../bin/start.p0sharder.sh /var/0chain/sharder/ssd /var/0chain/sharder/hdd diff --git a/0chain/sharder-files/docker.local/build.sharder/p0docker-compose.yaml b/0chain/sharder-files/docker.local/build.sharder/p0docker-compose.yaml index b6f77406..221f5deb 100644 --- a/0chain/sharder-files/docker.local/build.sharder/p0docker-compose.yaml +++ b/0chain/sharder-files/docker.local/build.sharder/p0docker-compose.yaml @@ -10,14 +10,14 @@ services: POSTGRES_HOST_AUTH_METHOD: trust POSTGRES_PASSWORD: zchian SLOW_TABLESPACE_PATH: /var/lib/postgresql/hdd_ts - SLOW_TABLESPACE: hddtablespace # this should match with the dbs.events.slowtablespace in 0chain.yaml + SLOW_TABLESPACE: hdd_tablespace # this should match with the dbs.events.slowtablespace in 0chain.yaml # ports: # - 5432 volumes: - ../config/postgresql.conf:/etc/postgresql/postgresql.conf - - ${PROJECT_ROOT_SSD}/docker.local/sharder${SHARDER}/data/postgresql:/var/lib/postgresql/data + - ${PROJECT_ROOT_SSD}/sharder${SHARDER}/data/postgresql:/var/lib/postgresql/data - ../sql_script/:/docker-entrypoint-initdb.d/ - - ${PROJECT_ROOT_HDD}/docker.local/sharder${SHARDER}/data/postgresql2:/var/lib/postgresql/hdd_ts + - ${PROJECT_ROOT_HDD}/sharder${SHARDER}/data/postgresql2:/var/lib/postgresql/hdd_ts command: postgres -c config_file=/etc/postgresql/postgresql.conf restart: unless-stopped networks: @@ -26,7 +26,7 @@ services: sharder: container_name: sharder-${SHARDER} - image: 0chaindev/sharder:v1.10.0 + image: 0chaindev/sharder:v1.11.0 environment: - POSTGRES_HOST=sharder-postgres-${SHARDER} - DOCKER=true @@ -41,7 +41,7 @@ services: command: - /bin/sh - -c - - mkdir -p /0chain/data/blocks && mkdir -p /0chain/data/rocksdb && ./bin/sharder --deployment_mode 0 --keys_file config/b0snode${SHARDER}_keys.txt + - mkdir -p /0chain/data/blocks && mkdir -p /0chain/data/rocksdb && ./bin/sharder --deployment_mode 2 --keys_file config/b0snode${SHARDER}_keys.txt restart: unless-stopped networks: testnet0: diff --git a/0chain/sharder-files/docker.local/config/0chain.yaml b/0chain/sharder-files/docker.local/config/0chain.yaml index e37f708e..4b90378b 100644 --- a/0chain/sharder-files/docker.local/config/0chain.yaml +++ b/0chain/sharder-files/docker.local/config/0chain.yaml @@ -1,29 +1,15 @@ version: 1.0 logging: - level: "debug" - verbose: true + level: "error" + verbose: false console: false # printing log to console is only supported in development mode goroutines: false memlog: false -development: - smart_contract: - zrc20: true - txn_generation: - wallets: 50 - max_transactions: 0 - max_txn_fee: 10000 - min_txn_fee: 0 - max_txn_value: 10000000000 - min_txn_value: 100 - faucet: - refill_amount: 1000000000000000 - pprof: true - server_chain: id: "0afc093ffb509f059c55478bc1a60351cef7b4e9c008a53a6cc8241ca8617dfe" - owner: "edb90b850f2e7e7cbd0a1fa370fdcc5cd378ffbec95363a7bc0e5a98b8ba5759" + owner: "ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4" decimals: 10 tokens: 200000000 genesis_block: @@ -33,13 +19,13 @@ server_chain: max_block_cost: 10000 #equal to 100ms max_byte_size: 1638400 min_generators: 2 - generators_percent: 0.15 + generators_percent: 0.1 replicators: 0 generation: timeout: 15 retry_wait_time: 5 #milliseconds proposal: - max_wait_time: 180ms + max_wait_time: 250ms wait_mode: static # static or dynamic consensus: threshold_by_count: 66 # percentage (registration) @@ -66,16 +52,15 @@ server_chain: payload: max_size: 98304 # bytes timeout: 1800 #30 minutes - min_fee: 0 - max_fee: 0.000000001 # 10 SAS - cost_fee_coeff: 100000 # 1000000 costs represents 1 ZCN, 1000 cost represents 1 mZCN + min_fee: 0.0001 + max_fee: 0.2 # max fee per txn would be 1 ZCN, adjust later if needed transfer_cost: 10 + cost_fee_coeff: 100000 # 100000 unit cost per 1 ZCN future_nonce: 100 # allow 100 nonce ahead of current client state exempt: - contributeMpk - shareSignsOrShares - wait - - mint - pour client: signature_scheme: bls0chain # ed25519 or bls0chain @@ -95,7 +80,7 @@ server_chain: setting_update_period: 200 #rounds timeout: 8000ms storage: true - faucet: true + faucet: false miner: true multisig: false vesting: false @@ -127,18 +112,18 @@ server_chain: events: # event database configuration local enabled: true - name: dummy - user: dummy + name: events_db + user: zchain_user password: dummy host: dummy #localhost port: 5432 max_idle_conns: 100 max_open_conns: 200 conn_max_lifetime: 20s - slowtablespace: hddtablespace + slowtablespace: hdd_tablespace settings: # event database settings blockchain - debug: true + debug: false aggregate_period: 4000 partition_change_period: 10000 partition_keep_count: 10 @@ -146,7 +131,7 @@ server_chain: network: magic_block_file: config/b0magicBlock.json - initial_states: config/initial_state.yaml + initial_states: config/initial_states.yaml genesis_dkg: 0 dns_url: "" # http://198.18.0.98:9091 relay_time: 200 # milliseconds @@ -156,27 +141,25 @@ network: large_message: 3000 # milliseconds large_message_th_size: 5120 # anything greater than this size in bytes user_handlers: - rate_limit: 100000000 # 100000000 per second + rate_limit: 1000 # 1000 per second n2n_handlers: - rate_limit: 10000000000 # 10000000000 per second + rate_limit: 1000 # 10000 per second # delegate wallet is wallet that used to configure node in Miner SC; if its # empty, then node ID used delegate_wallet: "" # % of fees and rewards for generator -service_charge: 0.10 # [0; 1) of all fees +service_charge: 0 # [0; 1) of all fees # max number of delegate pools allowed by a node in miner SC -number_of_delegates: 10 # max number of delegate pools - -cassandra: - connection: - delay: 10 # in seconds - retries: 10 -# host: cassandra -# port: 9042 +number_of_delegates: 100 # max number of delegate pools +# There's a TODO comment in fs_store.go. Please check this while we go into production. storage: -# Uncomment the following lines to enable cache. +# cache is optional. It should be SSD drive. Having HDD drive as cache is not effective. +# Cache is effective when blocks are stored in HDD. Cache stores uncompressed blocks so that +# accessing and unmarshalling is faster than with compressed block in HDD. +# +# Uncomment the following lines to enable cache. # cache: # path: "/mnt/ssd/sharder1" # total_blocks: 1000 # Total number of blocks this cache will store @@ -187,3 +170,11 @@ integration_tests: # lock_interval used by nodes to request server to connect to blockchain # after start lock_interval: 1s + +kafka: + enabled: false + host: "kafka-ip:9092" + username: "admin" + password: "password" + topic: "events" + write_timeout: 10s diff --git a/0chain/sharder-files/docker.local/config/postgresql.conf b/0chain/sharder-files/docker.local/config/postgresql.conf index 136b8e58..16b01a12 100644 --- a/0chain/sharder-files/docker.local/config/postgresql.conf +++ b/0chain/sharder-files/docker.local/config/postgresql.conf @@ -1,3 +1,55 @@ +# ----------------------------- +# PostgreSQL configuration file for 1024M RAM +# ----------------------------- +# +# This file consists of lines of the form: +# +# name = value +# +# (The "=" is optional.) Whitespace may be used. Comments are introduced with +# "#" anywhere on a line. The complete list of parameter names and allowed +# values can be found in the PostgreSQL documentation. +# +# The commented-out settings shown in this file represent the default values. +# Re-commenting a setting is NOT sufficient to revert it to the default value; +# you need to reload the server. +# +# This file is read on server startup and when the server receives a SIGHUP +# signal. If you edit the file on a running system, you have to SIGHUP the +# server for the changes to take effect, run "pg_ctl reload", or execute +# "SELECT pg_reload_conf()". Some parameters, which are marked below, +# require a server shutdown and restart to take effect. +# +# Any parameter can also be given as a command-line option to the server, e.g., +# "postgres -c log_connections=on". Some parameters can be changed at run time +# with the "SET" SQL command. +# +# Memory units: kB = kilobytes Time units: ms = milliseconds +# MB = megabytes s = seconds +# GB = gigabytes min = minutes +# TB = terabytes h = hours +# d = days + + +#------------------------------------------------------------------------------ +# FILE LOCATIONS +#------------------------------------------------------------------------------ + +# The default values of these variables are driven from the -D command-line +# option or PGDATA environment variable, represented here as ConfigDir. + +#data_directory = 'ConfigDir' # use data in another directory + # (change requires restart) +#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file + # (change requires restart) +#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file + # (change requires restart) + +# If external_pid_file is not explicitly set, no extra PID file is written. +#external_pid_file = '' # write an extra PID file + # (change requires restart) + + #------------------------------------------------------------------------------ # CONNECTIONS AND AUTHENTICATION #------------------------------------------------------------------------------ @@ -9,7 +61,52 @@ listen_addresses = '*' # defaults to 'localhost'; use '*' for all # (change requires restart) #port = 5432 # (change requires restart) -max_connections = 1000 # (change requires restart) +max_connections = 300 # (change requires restart) +#superuser_reserved_connections = 3 # (change requires restart) +#unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories + # (change requires restart) +#unix_socket_group = '' # (change requires restart) +#unix_socket_permissions = 0777 # begin with 0 to use octal notation + # (change requires restart) +#bonjour = off # advertise server via Bonjour + # (change requires restart) +#bonjour_name = '' # defaults to the computer name + # (change requires restart) + +# - TCP Keepalives - +# see "man 7 tcp" for details + +#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds; + # 0 selects the system default +#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds; + # 0 selects the system default +#tcp_keepalives_count = 0 # TCP_KEEPCNT; + # 0 selects the system default + +# - Authentication - + +#authentication_timeout = 1min # 1s-600s +#password_encryption = md5 # md5 or scram-sha-256 +#db_user_namespace = off + +# GSSAPI using Kerberos +#krb_server_keyfile = '' +#krb_caseins_users = off + +# - SSL - + +#ssl = off +#ssl_ca_file = '' +#ssl_cert_file = 'server.crt' +#ssl_crl_file = '' +#ssl_key_file = 'server.key' +#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers +#ssl_prefer_server_ciphers = on +#ssl_ecdh_curve = 'prime256v1' +#ssl_dh_params_file = '' +#ssl_passphrase_command = '' +#ssl_passphrase_command_supports_reload = off + #------------------------------------------------------------------------------ # RESOURCE USAGE (except WAL) @@ -18,17 +115,17 @@ max_connections = 1000 # (change requires restart) # - Memory - #1/4 RAM -shared_buffers = 250MB # min 128kB 1/4 RAM +shared_buffers = 16GB # min 128kB 1/4 RAM # (change requires restart) -#huge_pages = try # on, off, or try +huge_pages = try # on, off, or try # (change requires restart) #temp_buffers = 8MB # min 800kB #max_prepared_transactions = 0 # zero disables the feature # (change requires restart) # Caution: it is not advisable to set max_prepared_transactions nonzero unless # you actively intend to use prepared transactions. -work_mem = 2MB # min 64kB -maintenance_work_mem = 2MB # min 1MB +work_mem = 13981kB # min 64kB +maintenance_work_mem = 2GB # min 1MB autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem #max_stack_depth = 2MB # min 100kB dynamic_shared_memory_type = posix # the default is the first option @@ -40,6 +137,45 @@ dynamic_shared_memory_type = posix # the default is the first option # use none to disable dynamic shared memory # (change requires restart) +# - Disk - + +#temp_file_limit = -1 # limits per-process temp file space + # in kB, or -1 for no limit + +# - Kernel Resources - + +#max_files_per_process = 1000 # min 25 + # (change requires restart) + +# - Cost-Based Vacuum Delay - + +vacuum_cost_delay = 10 # 0-100 milliseconds +#vacuum_cost_page_hit = 1 # 0-10000 credits +#vacuum_cost_page_miss = 10 # 0-10000 credits +#vacuum_cost_page_dirty = 20 # 0-10000 credits +vacuum_cost_limit = 200 # 1-10000 credits + +# - Background Writer - + +#bgwriter_delay = 200ms # 10-10000ms between rounds +#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables +#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round +#bgwriter_flush_after = 512kB # measured in pages, 0 disables + +# - Asynchronous Behavior - + +effective_io_concurrency = 200 # 1-1000; 0 disables prefetching +max_worker_processes = 12 # (change requires restart) +max_parallel_maintenance_workers = 4 # taken from max_parallel_workers +max_parallel_workers_per_gather = 4 # taken from max_parallel_workers +#parallel_leader_participation = on +max_parallel_workers = 12 # maximum number of max_worker_processes that + # can be used in parallel operations +#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate + # (change requires restart) +#backend_flush_after = 0 # measured in pages, 0 disables + + #------------------------------------------------------------------------------ # WRITE-AHEAD LOG #------------------------------------------------------------------------------ @@ -51,30 +187,420 @@ dynamic_shared_memory_type = posix # the default is the first option fsync = off # flush data to disk for crash safety # (turning this off can cause # unrecoverable data corruption) +#synchronous_commit = on # synchronization level; + # off, local, remote_write, remote_apply, or on +#wal_sync_method = fsync # the default is the first option + # supported by the operating system: + # open_datasync + # fdatasync (default on Linux and FreeBSD) + # fsync + # fsync_writethrough + # open_sync +#full_page_writes = on # recover from partial page writes +#wal_compression = off # enable compression of full-page writes +#wal_log_hints = off # also do full page writes of non-critical updates + # (change requires restart) +wal_buffers = 16MB # min 32kB, -1 sets based on shared_buffers + # (change requires restart) +#wal_writer_delay = 200ms # 1-10000 milliseconds +#wal_writer_flush_after = 1MB # measured in pages, 0 disables + commit_delay = 500 # range 0-100000, in microseconds commit_siblings = 10 # range 1-1000 # - Checkpoints - #checkpoint_timeout = 5min # range 30s-1d -max_wal_size = 4GB +max_wal_size = 8GB min_wal_size = 2GB +checkpoint_completion_target = 0.9 # checkpoint target duration, 0.0 - 1.0 +#checkpoint_flush_after = 256kB # measured in pages, 0 disables +#checkpoint_warning = 30s # 0 disables + +# - Archiving - + +#archive_mode = off # enables archiving; off, on, or always + # (change requires restart) +#archive_command = '' # command to use to archive a logfile segment + # placeholders: %p = path of file to archive + # %f = file name only + # e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f' +#archive_timeout = 0 # force a logfile segment switch after this + # number of seconds; 0 disables + + +#------------------------------------------------------------------------------ +# REPLICATION +#------------------------------------------------------------------------------ + +# - Sending Servers - + +# Set these on the master and on any standby that will send replication data. + +#max_wal_senders = 10 # max number of walsender processes + # (change requires restart) +#wal_keep_segments = 0 # in logfile segments; 0 disables +#wal_sender_timeout = 60s # in milliseconds; 0 disables + +#max_replication_slots = 10 # max number of replication slots + # (change requires restart) +#track_commit_timestamp = off # collect timestamp of transaction commit + # (change requires restart) + +# - Master Server - + +# These settings are ignored on a standby server. + +#synchronous_standby_names = '' # standby servers that provide sync rep + # method to choose sync standbys, number of sync standbys, + # and comma-separated list of application_name + # from standby(s); '*' = all +#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed + +# - Standby Servers - + +# These settings are ignored on a master server. + +#hot_standby = on # "off" disallows queries during recovery + # (change requires restart) +#max_standby_archive_delay = 30s # max delay before canceling queries + # when reading WAL from archive; + # -1 allows indefinite delay +#max_standby_streaming_delay = 30s # max delay before canceling queries + # when reading streaming WAL; + # -1 allows indefinite delay +#wal_receiver_status_interval = 10s # send replies at least this often + # 0 disables +#hot_standby_feedback = off # send info from standby to prevent + # query conflicts +#wal_receiver_timeout = 60s # time that receiver waits for + # communication from master + # in milliseconds; 0 disables +#wal_retrieve_retry_interval = 5s # time to wait before retrying to + # retrieve WAL after a failed attempt + +# - Subscribers - + +# These settings are ignored on a publisher. + +#max_logical_replication_workers = 4 # taken from max_worker_processes + # (change requires restart) +#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers + #------------------------------------------------------------------------------ # QUERY TUNING #------------------------------------------------------------------------------ +# - Planner Method Configuration - + +#enable_bitmapscan = on +#enable_hashagg = on +#enable_hashjoin = on +#enable_indexscan = on +#enable_indexonlyscan = on +#enable_material = on +#enable_mergejoin = on +#enable_nestloop = on +#enable_parallel_append = on +#enable_seqscan = on +#enable_sort = on +#enable_tidscan = on +#enable_partitionwise_join = off +#enable_partitionwise_aggregate = off +#enable_parallel_hash = on +#enable_partition_pruning = on + +# - Planner Cost Constants - + +#seq_page_cost = 1.0 # measured on an arbitrary scale +random_page_cost = 1.1 # same scale as above +#cpu_tuple_cost = 0.01 # same scale as above +#cpu_index_tuple_cost = 0.005 # same scale as above +#cpu_operator_cost = 0.0025 # same scale as above +#parallel_tuple_cost = 0.1 # same scale as above +#parallel_setup_cost = 1000.0 # same scale as above + +#jit_above_cost = 100000 # perform JIT compilation if available + # and query more expensive than this; + # -1 disables +#jit_inline_above_cost = 500000 # inline small functions if query is + # more expensive than this; -1 disables +#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if + # query is more expensive than this; + # -1 disables + #min_parallel_table_scan_size = 8MB #min_parallel_index_scan_size = 512kB -effective_cache_size = 50MB +effective_cache_size = 48GB + +# - Genetic Query Optimizer - + +#geqo = on +#geqo_threshold = 12 +#geqo_effort = 5 # range 1-10 +#geqo_pool_size = 0 # selects default based on effort +#geqo_generations = 0 # selects default based on effort +#geqo_selection_bias = 2.0 # range 1.5-2.0 +#geqo_seed = 0.0 # range 0.0-1.0 + +# - Other Planner Options - + +default_statistics_target = 100 # range 1-10000 +#constraint_exclusion = partition # on, off, or partition +#cursor_tuple_fraction = 0.1 # range 0.0-1.0 +#from_collapse_limit = 8 +#join_collapse_limit = 8 # 1 disables collapsing of explicit + # JOIN clauses +#force_parallel_mode = off +#jit = off # allow JIT compilation + + +#------------------------------------------------------------------------------ +# REPORTING AND LOGGING +#------------------------------------------------------------------------------ + +# - Where to Log - + +#log_destination = 'stderr' # Valid values are combinations of + # stderr, csvlog, syslog, and eventlog, + # depending on platform. csvlog + # requires logging_collector to be on. + +# This is used when logging to stderr: +#logging_collector = off # Enable capturing of stderr and csvlog + # into log files. Required to be on for + # csvlogs. + # (change requires restart) + +# These are only used if logging_collector is on: +#log_directory = 'log' # directory where log files are written, + # can be absolute or relative to PGDATA +#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern, + # can include strftime() escapes +#log_file_mode = 0600 # creation mode for log files, + # begin with 0 to use octal notation +#log_truncate_on_rotation = off # If on, an existing log file with the + # same name as the new log file will be + # truncated rather than appended to. + # But such truncation only occurs on + # time-driven rotation, not on restarts + # or size-driven rotation. Default is + # off, meaning append to existing files + # in all cases. +#log_rotation_age = 1d # Automatic rotation of logfiles will + # happen after that time. 0 disables. +#log_rotation_size = 10MB # Automatic rotation of logfiles will + # happen after that much log output. + # 0 disables. + +# These are relevant when logging to syslog: +#syslog_facility = 'LOCAL0' +#syslog_ident = 'postgres' +#syslog_sequence_numbers = on +#syslog_split_messages = on + +# This is only relevant when logging to eventlog (win32): +# (change requires restart) +#event_source = 'PostgreSQL' + +# - When to Log - + +#log_min_messages = warning # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic +#log_min_error_statement = error # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # info + # notice + # warning + # error + # log + # fatal + # panic (effectively off) + +#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements + # and their durations, > 0 logs only + # statements running at least this number + # of milliseconds + + +# - What to Log - + +#debug_print_parse = off +#debug_print_rewritten = off +#debug_print_plan = off +#debug_pretty_print = on +#log_checkpoints = off +#log_connections = off +#log_disconnections = off +#log_duration = off +#log_error_verbosity = default # terse, default, or verbose messages +#log_hostname = off +#log_line_prefix = '%m [%p] ' # special values: + # %a = application name + # %u = user name + # %d = database name + # %r = remote host and port + # %h = remote host + # %p = process ID + # %t = timestamp without milliseconds + # %m = timestamp with milliseconds + # %n = timestamp with milliseconds (as a Unix epoch) + # %i = command tag + # %e = SQL state + # %c = session ID + # %l = session line number + # %s = session start timestamp + # %v = virtual transaction ID + # %x = transaction ID (0 if none) + # %q = stop here in non-session + # processes + # %% = '%' + # e.g. '<%u%%%d> ' +log_lock_waits = on # log lock waits >= deadlock_timeout +#log_statement = 'none' # none, ddl, mod, all +#log_replication_commands = off +#log_temp_files = -1 # log temporary files equal or larger + # than the specified size in kilobytes; + # -1 disables, 0 logs all temp files log_timezone = 'Etc/UTC' +#------------------------------------------------------------------------------ +# PROCESS TITLE +#------------------------------------------------------------------------------ + +#cluster_name = '' # added to process titles if nonempty + # (change requires restart) +#update_process_title = on + + +#------------------------------------------------------------------------------ +# STATISTICS +#------------------------------------------------------------------------------ + +# - Query and Index Statistics Collector - + +#track_activities = on +#track_counts = on +#track_io_timing = off +#track_functions = none # none, pl, all +#track_activity_query_size = 1024 # (change requires restart) +#stats_temp_directory = 'pg_stat_tmp' + + +# - Monitoring - + +#log_parser_stats = off +#log_planner_stats = off +#log_executor_stats = off +#log_statement_stats = off + + +#------------------------------------------------------------------------------ +# AUTOVACUUM +#------------------------------------------------------------------------------ + +autovacuum = on # Enable autovacuum subprocess? 'on' + # requires track_counts to also be on. +#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and + # their durations, > 0 logs only + # actions running at least this number + # of milliseconds. +#autovacuum_max_workers = 3 # max number of autovacuum subprocesses + # (change requires restart) +#autovacuum_naptime = 1min # time between autovacuum runs +#autovacuum_vacuum_threshold = 50 # min number of row updates before + # vacuum +#autovacuum_analyze_threshold = 50 # min number of row updates before + # analyze +#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum +#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze +#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum + # (change requires restart) +#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age + # before forced vacuum + # (change requires restart) +#autovacuum_vacuum_cost_delay = 20ms # default vacuum cost delay for + # autovacuum, in milliseconds; + # -1 means use vacuum_cost_delay +#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for + # autovacuum, -1 means use + # vacuum_cost_limit + + +#------------------------------------------------------------------------------ +# CLIENT CONNECTION DEFAULTS +#------------------------------------------------------------------------------ + +# - Statement Behavior - + +#client_min_messages = notice # values in order of decreasing detail: + # debug5 + # debug4 + # debug3 + # debug2 + # debug1 + # log + # notice + # warning + # error +#search_path = '"$user", public' # schema names +#row_security = on +#default_tablespace = '' # a tablespace name, '' uses the default +#temp_tablespaces = '' # a list of tablespace names, '' uses + # only default tablespace +#check_function_bodies = on +#default_transaction_isolation = 'read committed' +#default_transaction_read_only = off +#default_transaction_deferrable = off +#session_replication_role = 'origin' +#statement_timeout = 0 # in milliseconds, 0 is disabled +#lock_timeout = 0 # in milliseconds, 0 is disabled +#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled +#vacuum_freeze_min_age = 50000000 +#vacuum_freeze_table_age = 150000000 +#vacuum_multixact_freeze_min_age = 5000000 +#vacuum_multixact_freeze_table_age = 150000000 +#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples + # before index cleanup, 0 always performs + # index cleanup +#bytea_output = 'hex' # hex, escape +#xmlbinary = 'base64' +#xmloption = 'content' +#gin_fuzzy_search_limit = 0 +#gin_pending_list_limit = 4MB + # - Locale and Formatting - datestyle = 'iso, mdy' #intervalstyle = 'postgres' timezone = 'Etc/UTC' +#timezone_abbreviations = 'Default' # Select the set of available time zone + # abbreviations. Currently, there are + # Default + # Australia (historical usage) + # India + # You can create your own file in + # share/timezonesets/. +#extra_float_digits = 0 # min -15, max 3 +#client_encoding = sql_ascii # actually, defaults to database + # encoding # These settings are initialized by initdb, but they can be changed. lc_messages = 'en_US.utf8' # locale for system error message @@ -91,3 +617,80 @@ default_text_search_config = 'pg_catalog.english' shared_preload_libraries = 'pg_stat_statements' # (change requires restart) pg_stat_statements.max = 10000 pg_stat_statements.track = all +#local_preload_libraries = '' +#session_preload_libraries = '' +#jit_provider = 'llvmjit' # JIT library to use + +# - Other Defaults - + +#dynamic_library_path = '$libdir' +#extension_destdir = '' # prepend path when loading extensions + # and shared objects (added by Debian) + + +#------------------------------------------------------------------------------ +# LOCK MANAGEMENT +#------------------------------------------------------------------------------ + +#deadlock_timeout = 1s +max_locks_per_transaction = 1024 # min 10 + # (change requires restart) +#max_pred_locks_per_transaction = 64 # min 10 + # (change requires restart) +#max_pred_locks_per_relation = -2 # negative values mean + # (max_pred_locks_per_transaction + # / -max_pred_locks_per_relation) - 1 +#max_pred_locks_per_page = 2 # min 0 + + +#------------------------------------------------------------------------------ +# VERSION AND PLATFORM COMPATIBILITY +#------------------------------------------------------------------------------ + +# - Previous PostgreSQL Versions - + +#array_nulls = on +#backslash_quote = safe_encoding # on, off, or safe_encoding +#default_with_oids = off +#escape_string_warning = on +#lo_compat_privileges = off +#operator_precedence_warning = off +#quote_all_identifiers = off +#standard_conforming_strings = on +#synchronize_seqscans = on + +# - Other Platforms and Clients - + +#transform_null_equals = off + + +#------------------------------------------------------------------------------ +# ERROR HANDLING +#------------------------------------------------------------------------------ + +#exit_on_error = off # terminate session on any error? +#restart_after_crash = on # reinitialize after backend crash? +#data_sync_retry = off # retry or panic on failure to fsync + # data? + # (change requires restart) + + +#------------------------------------------------------------------------------ +# CONFIG FILE INCLUDES +#------------------------------------------------------------------------------ + +# These options allow settings to be loaded from files other than the +# default postgresql.conf. Note that these are directives, not variable +# assignments, so they can usefully be given more than once. + +#include_dir = '...' # include files ending in '.conf' from + # a directory, e.g., 'conf.d' +#include_if_exists = '...' # include file only if it exists +#include = '...' # include file + + +#------------------------------------------------------------------------------ +# CUSTOMIZED OPTIONS +#------------------------------------------------------------------------------ + +# Add settings for extensions here diff --git a/0chain/sharder-files/docker.local/config/sc.yaml b/0chain/sharder-files/docker.local/config/sc.yaml index 2b764164..aba26140 100644 --- a/0chain/sharder-files/docker.local/config/sc.yaml +++ b/0chain/sharder-files/docker.local/config/sc.yaml @@ -1,36 +1,37 @@ smart_contracts: faucetsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 pour_limit: 1 pour_amount: 1 - max_pour_amount: 100000 - periodic_limit: 100000000 - global_limit: 100000000 - individual_reset: 10m # in hours - global_reset: 10m # in hours + max_pour_amount: 10 + periodic_limit: 1000 + global_limit: 100000 + individual_reset: 3h # in hours + global_reset: 48h # in hours cost: update-settings: 100 - pour: 226 + pour: 100 refill: 100 minersc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 # miners - max_n: 92 # 100 - min_n: 92 # 3 + max_n: 103 # 100 + min_n: 103 # 3 # sharders max_s: 27 # 30 min_s: 1 # 1 # max delegates allowed by SC - max_delegates: 200 # + max_delegates: 100 # # DKG t_percent: .66 # of active k_percent: .75 # of registered x_percent: 0.70 # percentage of prev mb miners required to be part of next mb # etc - min_stake: 0.0 # min stake can be set by a node (boundary for all nodes) - max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + min_stake: 1000.0 # min stake can be set by a node (boundary for all nodes) + max_stake: 2000000.0 # max stake can be set by a node (boundary for all nodes) + min_stake_per_delegate: 50000.0 start_rounds: 50 contribute_rounds: 50 share_rounds: 50 @@ -40,13 +41,13 @@ smart_contracts: reward_rate: 1.0 # [0; 1) # share ratio is miner/block sharders rewards ratio, for example 0.1 # gives 10% for miner and rest for block sharders - share_ratio: 0.16 # [0; 1) + share_ratio: 0.79 # [0; 1) # reward for a block - block_reward: 0.068 # tokens + block_reward: 0.09 # tokens # max service charge can be set by a generator max_charge: 0.5 # % # epoch is number of rounds before rewards and interest are decreased - epoch: 125000000 # rounds + epoch: 95000000 # rounds # decline rewards every new epoch by this value (the block_reward) reward_decline_rate: 0.1 # [0; 1), 0.1 = 10% # no mints after miner SC total mints reaches this boundary @@ -62,49 +63,50 @@ smart_contracts: health_check_period: 90m cooldown_period: 100 cost: - add_miner: 318 + add_miner: 361 add_sharder: 331 - delete_miner: 435 - delete_sharder: 308 - miner_health_check: 137 - sharder_health_check: 141 - contributeMpk: 1347 - shareSignsOrShares: 495 + delete_miner: 484 + delete_sharder: 335 + miner_health_check: 149 + sharder_health_check: 145 + contributeMpk: 1433 + shareSignsOrShares: 509 wait: 100 #todo - update_globals: 250 - update_settings: 120 - update_miner_settings: 125 - update_sharder_settings: 125 - payFees: 1230 + update_globals: 269 + update_settings: 136 + update_miner_settings: 137 + update_sharder_settings: 134 + payFees: 1356 feesPaid: 100 #todo mintedTokens: 100 #todo - addToDelegatePool: 175 - deleteFromDelegatePool: 139 - sharder_keep: 197 - collect_reward: 208 - kill_miner: 133 - kill_sharder: 138 + addToDelegatePool: 186 + deleteFromDelegatePool: 150 + sharder_keep: 211 + collect_reward: 230 + kill_miner: 146 + kill_sharder: 140 storagesc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 # the time_unit is a duration used as divider for a write price; a write # price measured in tok / GB / time_unit, where the time_unit is this # configuration; for example 1h, 24h (a day), 720h (a month -- 30 days); - time_unit: "720h" - min_stake: 0.01 # min stake can be set by a node (boundary for all nodes) - max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + time_unit: "8760h" + min_stake: 1.0 # min stake can be set by a node (boundary for all nodes) + max_stake: 2000000.0 # max stake can be set by a node (boundary for all nodes) # max_mint max_mint: 75000000.0 # tokens, max amount of tokens can be minted by SC + min_stake_per_delegate: 10.0 # min possible allocations size in bytes allowed by the SC - min_alloc_size: 1024 - # max challenge completion time of a blobber allowed by the SC - max_challenge_completion_time: "10m" + min_alloc_size: 1073741824 + # max challenge completion round of a blobber allowed by the SC + max_challenge_completion_rounds: 1200 # min blobber's offer duration allowed by the SC - min_offer_duration: "10h" #todo based on timeunit + min_offer_duration: "8760h" #todo based on timeunit # min blobber capacity allowed by the SC - min_blobber_capacity: 1024 + min_blobber_capacity: 10995116277760 # fraction of the allocation cost that is locked in the cancellation charge cancellation_charge: 0.2 - min_lock_demand: 0.1 + min_lock_demand: 1 # users' read pool related configurations readpool: min_lock: 0.0 # tokens @@ -114,18 +116,18 @@ smart_contracts: # stake pool configurations stakepool: # minimal lock for a delegate pool - min_lock: 0.01 # tokens + min_lock: 0.1 # tokens kill_slash: 0.5 # following settings are for free storage rewards # # summarized amount for all assigner's lifetime - max_total_free_allocation: 100000000000000000 #todo figure out how it works + max_total_free_allocation: 10000000 # the limit of tokens can be minted on each free_allocation_request - max_individual_free_allocation: 1000000 + max_individual_free_allocation: 1 # allocation settings for free storage # these values are applied to all free allocations free_allocation_settings: - data_shards: 2 + data_shards: 6 parity_shards: 3 read_pool_fraction: 0 read_price_range: @@ -133,7 +135,7 @@ smart_contracts: min: 0 size: 2147483648 write_price_range: - max: 1 + max: 0.025 min: 0 validator_reward: 0.025 # blobber_slash represents blobber's stake penalty when a challenge not @@ -142,28 +144,32 @@ smart_contracts: # duration between health check after which a blobber or validator is considered inactive health_check_period: 90m # max prices for blobbers (tokens per GB) - max_read_price: 100.0 - max_write_price: 100.0 + max_read_price: 0.0 + max_write_price: 0.025 min_write_price: 0.001 - max_blobbers_per_allocation: 40 + # max file size on blobber + max_file_size: 549755813888 # 512GB + max_blobbers_per_allocation: 30 # # challenges # # enable challenges challenge_enabled: true + challenge_generation_gap : 1 # number of validators per challenge - validators_per_challenge: 2 + validators_per_challenge: 10 num_validators_rewarded: 10 + max_blobber_select_for_challenge: 5 # max delegates per stake pool allowed by SC - max_delegates: 200 + max_delegates: 100 # max_charge allowed for blobbers; the charge is part of blobber rewards # goes to blobber's delegate wallets, other part goes to related stake # holders max_charge: 0.50 # reward paid out every block block_reward: - block_reward: 0.06 - block_reward_change_period: 125000000 + block_reward: 2.37 + block_reward_change_period: 95000000 block_reward_change_ratio: 0.1 qualifying_stake: 1 trigger_period: 30 @@ -176,43 +182,39 @@ smart_contracts: k: 0.9 mu: 0.2 cost: - update_settings: 135 - read_redeem: 606 - commit_connection: 670 - new_allocation_request: 3000 - update_allocation_request: 2500 - finalize_allocation: 993 - cancel_allocation: 5000 - add_free_storage_assigner: 115 - free_allocation_request: 2417 - free_update_allocation: 2500 - blobber_health_check: 88 - validator_health_check: 87 - update_blobber_settings: 309 - update_validator_settings: 214 - pay_blobber_block_rewards: 807 - challenge_request: 100 #todo - challenge_response: 684 - add_validator: 443 - add_blobber: 240 - new_read_pool: 94 - read_pool_lock: 154 - read_pool_unlock: 93 - write_pool_lock: 167 - write_pool_unlock: 103 - stake_pool_lock: 167 - stake_pool_unlock: 103 - stake_pool_pay_interests: 100 #todo - commit_settings_changes: 52 - generate_challenge: 694 - blobber_block_rewards: 806 - collect_reward: 158 - kill_blobber: 669 - kill_validator: 350 - shutdown_blobber: 100 - shutdown_validator: 100 + update_settings: 143 + read_redeem: 664 + commit_connection: 743 + new_allocation_request: 1919 + update_allocation_request: 2692 + finalize_allocation: 1091 + cancel_allocation: 1163 + add_free_storage_assigner: 124 + free_allocation_request: 2132 + free_update_allocation: 1468 + blobber_health_check: 97 + validator_health_check: 109 + update_blobber_settings: 338 + update_validator_settings: 247 + pay_blobber_block_rewards: 100 #todo + challenge_response: 728 + add_validator: 348 + add_blobber: 266 + read_pool_lock: 170 + read_pool_unlock: 104 + write_pool_lock: 186 + stake_pool_lock: 187 + stake_pool_unlock: 119 + commit_settings_changes: 56 + generate_challenge: 600 + blobber_block_rewards: 794 + collect_reward: 181 + kill_blobber: 651 + kill_validator: 277 + shutdown_blobber: 597 + shutdown_validator: 227 vestingsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 min_lock: 0.01 min_duration: "2m" max_duration: "2h" @@ -226,14 +228,17 @@ smart_contracts: delete: 100 vestingsc-update-settings: 100 zcnsc: - owner_id: 1746b06bb09f55ee01b33b5e2e055d6cc7a900cb57c0a3a5eaabb8a0e7745802 + owner_id: ac112d5620eb7918e08086506b4c5fba9419be09b3dd97e47e02778b0927dcc4 min_mint: 1 min_burn: 1 - min_stake: 0 + min_stake: 1000 + max_stake: 20000.0 # max stake can be set by a node (boundary for all nodes) + min_stake_per_delegate: 10000 min_authorizers: 1 percent_authorizers: 0.7 + max_mint: 200000000 # ZCN max_delegates: 10 - max_fee: 100 #todo change the wording + max_fee: 10000000 #todo change the wording burn_address: "0000000000000000000000000000000000000000000000000000000000000000" #todo maybe we should use sc address health_check_period: 90m cost: @@ -242,3 +247,6 @@ smart_contracts: add-authorizer: 100 authorizer-health-check: 100 delete-authorizer: 100 + update-global-config: 100 + add-to-delegate-pool: 100 + delete-from-delegate-pool: 100 diff --git a/0chain/sharder-pg-conf.sh b/0chain/sharder-pg-conf.sh new file mode 100644 index 00000000..b3977e01 --- /dev/null +++ b/0chain/sharder-pg-conf.sh @@ -0,0 +1,32 @@ +#!/bin/bash + +set -e +echo -e "\n\e[93m=============================================================================================================================================================================== + Running scipt on DB +=============================================================================================================================================================================== \e[39m" +docker exec sharder-postgres-1 psql -U zchain_user -d events_db -c """UPDATE miners +SET total_stake = ( + SELECT SUM(balance) + FROM delegate_pools + WHERE status = 0 + AND provider_id = miners.id +);""" + +echo -e "\n\e[93m=============================================================================================================================================================================== + Stopping sharder +=============================================================================================================================================================================== \e[39m" +docker stop sharder-1 sharder-postgres-1 + +echo -e "\n\e[93m=============================================================================================================================================================================== + Updating postgres.conf file on your server +=============================================================================================================================================================================== \e[39m" +cd ~ +wget -N https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/sharder-files/docker.local/config/postgresql.conf +cp -f postgresql.conf /var/0chain/sharder/ssd/docker.local/config/postgresql.conf + +echo -e "\n\e[93m=============================================================================================================================================================================== + Starting postgres and sharder +=============================================================================================================================================================================== \e[39m" +docker start sharder-postgres-1 +sleep 5s +docker start sharder-1 diff --git a/0chain/sharder_migration_init_setup.sh b/0chain/sharder_migration_init_setup.sh new file mode 100644 index 00000000..ddd7516e --- /dev/null +++ b/0chain/sharder_migration_init_setup.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -e + +############################################################ +# setup variables +############################################################ +export PROJECT_ROOT="/var/0chain" # /var/0chain +export PROJECT_ROOT_SSD=/var/0chain/sharder/ssd # /var/0chain/sharder/ssd +export PROJECT_ROOT_HDD=/var/0chain/sharder/hdd # /var/0chain/sharder/ssd +mkdir -p $PROJECT_ROOT_SSD +mkdir -p $PROJECT_ROOT_HDD + +echo -e "\n\e[93m=============================================================================================================================================================================== + Installing some pre-requisite tools on your server +=============================================================================================================================================================================== \e[39m" +echo -e "\e[32m 1. Apt update. \e[23m \e[0;37m" +sudo apt update +echo -e "\e[32m 2. Installing qq. \e[23m \e[0;37m" +sudo apt install -qq -y +echo -e "\e[32m 3. Installing unzip, dnsutils, ufw, ntp, ntpdate. \e[23m \e[0;37m" +sudo apt install unzip dnsutils ufw ntp ntpdate -y +echo -e "\e[32m 4. Installing docker & docker-compose. \e[23m \e[0;37m" +DOCKERCOMPOSEVER=v2.2.3 ; sudo apt install docker.io -y; sudo systemctl enable --now docker ; docker --version ; sudo curl -L "https://github.com/docker/compose/releases/download/$DOCKERCOMPOSEVER/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose; sudo chmod +x /usr/local/bin/docker-compose ; docker-compose --version +sudo chmod 777 /var/run/docker.sock &> /dev/null + +echo -e "\n\e[93m=============================================================================================================================================================================== + Setting up ntp +=============================================================================================================================================================================== \e[39m" +sudo ufw disable +sudo ufw allow 123/udp +sudo ufw allow out to any port 123 +sudo systemctl stop ntp +sudo ntpdate pool.ntp.org +sudo systemctl start ntp +sudo systemctl enable ntp + +echo -e "\n\e[93m=============================================================================================================================================================================== + Checking docker service running or not +=============================================================================================================================================================================== \e[39m" +echo -e "\e[32m 1. Docker status. \e[23m" +if (systemctl is-active --quiet docker) ; then + echo -e "\e[32m docker is running fine. \e[23m \n" +else + echo -e "\e[31m $REQUIRED_PKG is failing to run. Please check and resolve it first. You can connect with team for support too. \e[13m \n" + exit 1 +fi + +# echo -e "\n\e[93m=============================================================================================================================================================================== +# Checking URL entered is resolving or not. +# =============================================================================================================================================================================== \e[39m" +# ipaddr=$(curl api.ipify.org) +# myip=$(dig +short $PUBLIC_ENDPOINT) +# if [[ "$myip" != "$ipaddr" ]]; then +# echo "$PUBLIC_ENDPOINT IP resolution mistmatch $myip vs $ipaddr" +# exit 1 +# else +# echo "SUCCESS $PUBLIC_ENDPOINT resolves to $myip" +# fi diff --git a/0chain/sharder_miner_cleanup.sh b/0chain/sharder_miner_cleanup.sh new file mode 100644 index 00000000..eab45141 --- /dev/null +++ b/0chain/sharder_miner_cleanup.sh @@ -0,0 +1,23 @@ +#!/bin/bash + +echo -e "\n\e[93m=============================================================================================================================================================================== + setup variables +=============================================================================================================================================================================== \e[39m" +export PROJECT_ROOT=/var/0chain # /var/0chain +echo -e "\e[32m Successfully Created \e[23m \e[0;37m" + +echo -e "\n\e[93m=============================================================================================================================================================================== + cleaning up sharder/miner. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + docker rm -f $(docker ps -a -q) + rm -rf miner/ssd/* || true + rm -rf miner/hdd/* || true + rm -rf grafana-portainer/* || true + rm -rf sharder/ssd/* || true + rm -rf sharder/hdd/* || true + rm -rf *.zip || true + rm -rf initial_states.yaml || true + echo 'y' | docker system prune -a || true + echo 'y' | docker volume prune -a || true +popd > /dev/null; diff --git a/0chain/sharder_snapshot_build.sh b/0chain/sharder_snapshot_build.sh new file mode 100644 index 00000000..b74da6a4 --- /dev/null +++ b/0chain/sharder_snapshot_build.sh @@ -0,0 +1,93 @@ +#!/bin/bash + +export SHARDER_SNAP=$1 +export SNAP_VERSION=$2 + +# Stop sharder and postgres container on the server +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker stop sharder-1 sharder-postgres-1 + +# Creating snapshot folder +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot folder to store snapshot. +=============================================================================================================================================================================== \e[39m" +cd ~ +mkdir snapshots + +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot files. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshot +cd ~/snapshots +rm -rf sharder-sql2-${SNAP_VERSION}.tar.gz +rm -rf sharder-ssd-sql-${SNAP_VERSION}.tar.gz +rm -rf sharder-mpt-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-sql2-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/hdd/docker.local/sharder1/data/postgresql2 | pigz -p 10 > sharder-sql2-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-ssd-sql-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/ssd/docker.local/sharder1/data/postgresql | pigz -p 10 > sharder-ssd-sql-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-mpt-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/hdd/docker.local/sharder1/data/rocksdb | pigz -p 10 > sharder-mpt-${SNAP_VERSION}.tar.gz + +# echo "Creating tar files for sharder-blocks-${SNAP_VERSION} files" +# for ((idx=0; idx<=15; idx++)) +# do +# hex=$(printf "%x" $idx) +# echo "pack $hex" +# mkdir -p sharder-blocks-${SNAP_VERSION}/$hex +# packss -path /var/0chain/sharder/hdd/docker.local/sharder1/data/blocks/$hex -dest sharder-blocks-${SNAP_VERSION}/$hex -thread 36 +# # go run main.go -path /var/0chain/sharder/hdd/docker.local/sharder1/data/blocks/$hex -dest sharder-blocks-${SNAP_VERSION}/$hex --depth 2 --thread 25 +# done + +# echo "Creating tar file for sharder-blocks-${SNAP_VERSION}.tar.gz" +# tar -cvf - sharder-blocks-${SNAP_VERSION} | pigz -p 10 > sharder-blocks-${SNAP_VERSION}.tar.gz + +# Start sharder and postgres container on the server +echo -e "\n\e[93m=============================================================================================================================================================================== + Start sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker start sharder-1 sharder-postgres-1 + +# echo -e "\n\e[93m=============================================================================================================================================================================== +# Copy the snapshot wallet to location ~/snapshots/snapshot.json path. +# =============================================================================================================================================================================== \e[39m" +# read -p "Press enter after placing snapshot.json wallet to path ~/snapshot/" + +# move these zip file to zus blobber storage zus-snapshots/ +echo -e "\n\e[93m=============================================================================================================================================================================== + Moving snapshot files to zus storage. +=============================================================================================================================================================================== \e[39m" +# wget https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/zwallet-binary/zbox +# chmod +x zbox + +# echo "Generating config.yaml file" +# echo "block_worker: https://mainnet.zus.network/dns" > config.yaml +# echo "signature_scheme: bls0chain" >> config.yaml +# echo "min_submit: 20" >> config.yaml +# echo "min_confirmation: 20" >> config.yaml +# echo "confirmation_chain_length: 3" >> config.yaml +# echo "max_txn_query: 5" >> config.yaml +# echo "query_sleep_time: 5" >> config.yaml + +# echo "Generating snapshot.json file" +# echo '{"client_id":"e1373e3d129b8d125549ec2527d8515eff7b9b02e6094dff1fe6545b62058041","client_key":"560df578de5a224ac779a8e8e56c469243141171370c59b74a35b994df54c10c411107e97b350a6af9428d63ef42f54b2991880aa2205c7869909534efdab611","keys":[{"public_key":"560df578de5a224ac779a8e8e56c469243141171370c59b74a35b994df54c10c411107e97b350a6af9428d63ef42f54b2991880aa2205c7869909534efdab611","private_key":"94bed72f76cc48517e2f3b5386d0072d4d5c7e88e20300cc9d2385e51a235c01"}],"mnemonics":"auto icon flight enemy culture three field track album kiss accuse weather member diagram symbol where tank doll naive space injury problem blade universe","version":"1.0","date_created":"2024-03-21T12:56:02+01:00","nonce":0}' > snapshot.json + +# ./zbox upload --localpath ./sharder-sql2-${SNAP_VERSION}.tar.gz --remotepath /${SHARDER_SNAP}/sharder-sql2-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --silent +# ./zbox upload --localpath ./sharder-ssd-sql-${SNAP_VERSION}.tar.gz --remotepath /${SHARDER_SNAP}/sharder-ssd-sql-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --silent +# ./zbox upload --localpath ./sharder-mpt-${SNAP_VERSION}.tar.gz --remotepath /${SHARDER_SNAP}/sharder-mpt-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --silent + +# aws s3 cp sharder-blocks-${SNAP_VERSION}.tar.gz s3://zus-snapshots/${SHARDER_SNAP}/ +aws s3 cp sharder-sql2-${SNAP_VERSION}.tar.gz s3://zus-snapshots/${SHARDER_SNAP}/ +aws s3 cp sharder-ssd-sql-${SNAP_VERSION}.tar.gz s3://zus-snapshots/${SHARDER_SNAP}/ +aws s3 cp sharder-mpt-${SNAP_VERSION}.tar.gz s3://zus-snapshots/${SHARDER_SNAP}/ + +echo -e "\n\e[93m=============================================================================================================================================================================== + Link to docs to deploy sharder snapshot. +=============================================================================================================================================================================== \e[39m" +echo "Follow docs to deploy snapshot to bad sharder --> https://0chaindocs.gitbook.io/as-onboarding/recovery-from-snapshots/steps-to-apply-snapshot" + diff --git a/0chain/sharder_snapshot_build_restic.sh b/0chain/sharder_snapshot_build_restic.sh new file mode 100644 index 00000000..8355f7c5 --- /dev/null +++ b/0chain/sharder_snapshot_build_restic.sh @@ -0,0 +1,63 @@ +#!/bin/bash + +export SHARDER_SNAP=$1 +export SNAP_VERSION=$2 + +# Stop sharder and postgres container on the server +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker stop sharder-1 sharder-postgres-1 + +# Creating snapshot folder +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot folder to store snapshot. +=============================================================================================================================================================================== \e[39m" +cd ~ +mkdir snapshots + +echo -e "\n\e[93m=============================================================================================================================================================================== + Creating snapshot files. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshot +cd ~/snapshots +rm -rf sharder-sql2-${SNAP_VERSION}.tar.gz +rm -rf sharder-ssd-sql-${SNAP_VERSION}.tar.gz +rm -rf sharder-mpt-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-sql2-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/hdd/docker.local/sharder1/data/postgresql2 | pigz -p 10 > sharder-sql2-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-ssd-sql-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/ssd/docker.local/sharder1/data/postgresql | pigz -p 10 > sharder-ssd-sql-${SNAP_VERSION}.tar.gz + +echo "Creating tar file for sharder-mpt-${SNAP_VERSION}.tar.gz" +tar -cvf - /var/0chain/sharder/hdd/docker.local/sharder1/data/rocksdb | pigz -p 10 > sharder-mpt-${SNAP_VERSION}.tar.gz + +echo -e "\n\e[93m=============================================================================================================================================================================== + Start sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker start sharder-1 sharder-postgres-1 + +echo -e "\n\e[93m=============================================================================================================================================================================== + Moving snapshot files to zus storage using restic. +=============================================================================================================================================================================== \e[39m" +echo "Set environment variable to zs3server" +export AWS_ACCESS_KEY_ID=rootroot +export AWS_SECRET_ACCESS_KEY=rootroot +export RESTIC_REPOSITORY="s3:https://zs3server.zus.network/restic" +export RESTIC_PASSWORD="resticroot" + +restic -r s3:https://zs3server.zus.network/restic --verbose backup ./* + +if [ $? -eq 0 ]; then + echo "Snapshot stored to zus successfully." +else + echo "Snapshot upload failed." + exit +fi + +echo -e "\n\e[93m=============================================================================================================================================================================== + Link to docs to deploy sharder snapshot. +=============================================================================================================================================================================== \e[39m" +echo "Follow docs to deploy snapshot to bad sharder --> https://0chaindocs.gitbook.io/as-onboarding/recovery-from-snapshots/steps-to-apply-snapshot" diff --git a/0chain/sharder_snapshot_recovery.sh b/0chain/sharder_snapshot_recovery.sh new file mode 100644 index 00000000..038070db --- /dev/null +++ b/0chain/sharder_snapshot_recovery.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +export TAG=$1 +export SHARDER_SNAP=$2 +export SNAP_VERSION=$3 + +# Stopping existing/running sharder and postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker rm -f sharder-1 sharder-postgres-1 + +# Removing and Backup old data +echo -e "\n\e[93m=============================================================================================================================================================================== + Backing up and Removing sharder data from the server. +=============================================================================================================================================================================== \e[39m" +cd /var/0chain/sharder/hdd/docker.local/sharder1/data +if [ -d "./rocksdb_bkp" ]; then + echo "Removing older hdd /rocksdb_bkp" + rm -rf rocksdb_bkp +fi +if [ -d "./postgresql2_bkp" ]; then + echo "Removing older hdd /postgresql2_bkp" + rm -rf postgresql2_bkp +fi +echo "Backup recent hdd mpt and postgres data" +mv rocksdb rocksdb_bkp || true +mv postgresql2 postgresql2_bkp || true + +cd /var/0chain/sharder/ssd/docker.local/sharder1/data +if [ -d "./postgresql_bkp" ]; then + echo "Removing older ssd /postgresql_bkp" + rm -rf postgresql_bkp +fi +echo "Backup recent ssd postgres data" +mv postgresql postgresql_bkp || true + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading latest snapshot from the zus storage. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshot +cd /var/0chain/sharder/hdd +mkdir snapshot || true +cd snapshot +echo "Removing previous pulled snapshot if exists" +rm -rf ./* + +# echo "Downloading new snapshot" +# wget https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/zwallet-binary/zbox +# chmod +x zbox + +# echo "Generating config.yaml file" +# echo "block_worker: https://mainnet.zus.network/dns" > config.yaml +# echo "signature_scheme: bls0chain" >> config.yaml +# echo "min_submit: 20" >> config.yaml +# echo "min_confirmation: 20" >> config.yaml +# echo "confirmation_chain_length: 3" >> config.yaml +# echo "max_txn_query: 5" >> config.yaml +# echo "query_sleep_time: 5" >> config.yaml + +# echo "Generating snapshot.json file" +# echo '{"client_id":"e1373e3d129b8d125549ec2527d8515eff7b9b02e6094dff1fe6545b62058041","client_key":"560df578de5a224ac779a8e8e56c469243141171370c59b74a35b994df54c10c411107e97b350a6af9428d63ef42f54b2991880aa2205c7869909534efdab611","keys":[{"public_key":"560df578de5a224ac779a8e8e56c469243141171370c59b74a35b994df54c10c411107e97b350a6af9428d63ef42f54b2991880aa2205c7869909534efdab611","private_key":"94bed72f76cc48517e2f3b5386d0072d4d5c7e88e20300cc9d2385e51a235c01"}],"mnemonics":"auto icon flight enemy culture three field track album kiss accuse weather member diagram symbol where tank doll naive space injury problem blade universe","version":"1.0","date_created":"2024-03-21T12:56:02+01:00","nonce":0}' > snapshot.json + +# ./zbox download --remotepath /${SHARDER_SNAP}/sharder-sql2-${SNAP_VERSION}.tar.gz --localpath ./sharder-sql2-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --slient +# ./zbox download --remotepath /${SHARDER_SNAP}/sharder-ssd-sql-${SNAP_VERSION}.tar.gz --localpath ./sharder-ssd-sql-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --slient +# ./zbox download --remotepath /${SHARDER_SNAP}/sharder-mpt-${SNAP_VERSION}.tar.gz --localpath ./sharder-mpt-${SNAP_VERSION}.tar.gz --allocation a25cde7d0b06655f4f8eb86ec99050eee0b6d929161c62551456acc276adab63 --configDir . --config ./config.yaml --wallet snapshot.json --slient + +# wget https://zus-snapshots.s3.amazonaws.com/${SHARDER_SNAP}/sharder-blocks-${SNAP_VERSION}.tar.gz +wget https://zus-snapshots.s3.amazonaws.com/${SHARDER_SNAP}/sharder-mpt-${SNAP_VERSION}.tar.gz +wget https://zus-snapshots.s3.amazonaws.com/${SHARDER_SNAP}/sharder-sql2-${SNAP_VERSION}.tar.gz +wget https://zus-snapshots.s3.amazonaws.com/${SHARDER_SNAP}/sharder-ssd-sql-${SNAP_VERSION}.tar.gz + +echo -e "\n\e[93m=============================================================================================================================================================================== + Extracting snapshot files into destination folder. +=============================================================================================================================================================================== \e[39m" +# # extract sharder-blocks.tar.gz +# echo "Extracting sharder-blocks-${SNAP_VERSION}.tar.gz" +# tar -xzvf sharder-blocks-${SNAP_VERSION}.tar.gz + +# # Find all .tar.gz files in sharder_blocks and its subdirectories +# echo "Find all .tar.gz files in sharder_blocks and its subdirectories" +# find sharder-blocks -type f -name "*.tar.gz" -print0 | while IFS= read -r -d '' file; do +# echo "Extracting $file..." +# tar -xzvf "$file" -C / +# done + +# extract sharder-mpt.tar.gz to path /var/0chain/sharder/hdd/docker.local/sharder1/data/ +echo "extract sharder-mpt-${SNAP_VERSION}.tar.gz to path /var/0chain/sharder/hdd/docker.local/sharder1/data/" +tar -zxvf sharder-mpt-${SNAP_VERSION}.tar.gz -C / + +# extract sharder-ssd-sql.tar.gz /var/0chain/sharder/ssd/docker.local/sharder1/data/ +echo "extract sharder-ssd-sql-${SNAP_VERSION}.tar.gz /var/0chain/sharder/ssd/docker.local/sharder1/data/" +tar -zxvf sharder-ssd-sql-${SNAP_VERSION}.tar.gz -C / + +# extract sharder-sql2.tar.gz /var/0chain/sharder/hdd/docker.local/sharder1/data/ +echo "extract sharder-sql2-${SNAP_VERSION}.tar.gz /var/0chain/sharder/hdd/docker.local/sharder1/data/" +tar -zxvf sharder-sql2-${SNAP_VERSION}.tar.gz -C / + +# Starting Sharder with snapshot data +yq e -i ".services.sharder.image = \"0chaindev/sharder:${TAG}\"" /var/0chain/sharder/ssd/docker.local/build.sharder/p0docker-compose.yaml +cd /var/0chain/sharder/ssd/docker.local/sharder1/ +sudo bash ../bin/start.p0sharder.sh /var/0chain/sharder/ssd /var/0chain/sharder/hdd/ diff --git a/0chain/sharder_snapshot_recovery_restic.sh b/0chain/sharder_snapshot_recovery_restic.sh new file mode 100644 index 00000000..0ac1cb15 --- /dev/null +++ b/0chain/sharder_snapshot_recovery_restic.sh @@ -0,0 +1,97 @@ +#!/bin/bash + +export TAG=$1 +export SNAP_ID=$2 +export SNAP_VERSION=$3 + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading latest snapshot from the zus storage. +=============================================================================================================================================================================== \e[39m" +# Downloading snapshot +cd /var/0chain/sharder/hdd +mkdir snapshot || true +cd snapshot +echo "Removing previous pulled snapshot if exists" +rm -rf ./* + +echo "Installing Restic tool on the server" +sudo apt update -y +sudo apt install restic -y + +echo "Set environment variable to zs3server" +export AWS_ACCESS_KEY_ID=rootroot +export AWS_SECRET_ACCESS_KEY=rootroot +export RESTIC_REPOSITORY="s3:http://65.109.152.43:9004/miner/" +export RESTIC_PASSWORD="resticroot" + +restic cache --cleanup +restic restore ${SNAP_ID} --target ./ --verbose + +# if [ $? -eq 0 ]; then +# echo "Snapshot downloaded from zus successfully." +# else +# echo "Snapshot download failed. Please contact zus team" +# exit +# fi + +# Stopping existing/running sharder and postgres +echo -e "\n\e[93m=============================================================================================================================================================================== + Stop sharder and postgres container on the server. +=============================================================================================================================================================================== \e[39m" +docker rm -f sharder-1 sharder-postgres-1 + +# Removing and Backup old data +echo -e "\n\e[93m=============================================================================================================================================================================== + Backing up and Removing sharder data from the server. +=============================================================================================================================================================================== \e[39m" +cd /var/0chain/sharder/hdd/docker.local/sharder1/data +if [ -d "./rocksdb_bkp" ]; then + echo "Removing older hdd /rocksdb_bkp" + rm -rf rocksdb_bkp +fi +if [ -d "./postgresql2_bkp" ]; then + echo "Removing older hdd /postgresql2_bkp" + rm -rf postgresql2_bkp +fi +echo "Backup recent hdd mpt and postgres data" +mv rocksdb rocksdb_bkp || true +mv postgresql2 postgresql2_bkp || true + +cd /var/0chain/sharder/ssd/docker.local/sharder1/data +if [ -d "./postgresql_bkp" ]; then + echo "Removing older ssd /postgresql_bkp" + rm -rf postgresql_bkp +fi +echo "Backup recent ssd postgres data" +mv postgresql postgresql_bkp || true + +echo -e "\n\e[93m=============================================================================================================================================================================== + Extracting snapshot files into destination folder. +=============================================================================================================================================================================== \e[39m" +# # extract sharder-blocks.tar.gz +# echo "Extracting sharder-blocks-${SNAP_VERSION}.tar.gz" +# tar -xzvf sharder-blocks-${SNAP_VERSION}.tar.gz + +# # Find all .tar.gz files in sharder_blocks and its subdirectories +# echo "Find all .tar.gz files in sharder_blocks and its subdirectories" +# find sharder-blocks -type f -name "*.tar.gz" -print0 | while IFS= read -r -d '' file; do +# echo "Extracting $file..." +# tar -xzvf "$file" -C / +# done +cd /var/0chain/sharder/hdd/snapshot/ +# extract sharder-mpt.tar.gz to path /var/0chain/sharder/hdd/docker.local/sharder1/data/ +echo "extract sharder-mpt-${SNAP_VERSION}.tar.gz to path /var/0chain/sharder/hdd/docker.local/sharder1/data/" +tar -zxvf sharder-mpt-${SNAP_VERSION}.tar.gz -C / + +# extract sharder-ssd-sql.tar.gz /var/0chain/sharder/ssd/docker.local/sharder1/data/ +echo "extract sharder-ssd-sql-${SNAP_VERSION}.tar.gz /var/0chain/sharder/ssd/docker.local/sharder1/data/" +tar -zxvf sharder-ssd-sql-${SNAP_VERSION}.tar.gz -C / + +# extract sharder-sql2.tar.gz /var/0chain/sharder/hdd/docker.local/sharder1/data/ +echo "extract sharder-sql2-${SNAP_VERSION}.tar.gz /var/0chain/sharder/hdd/docker.local/sharder1/data/" +tar -zxvf sharder-sql2-${SNAP_VERSION}.tar.gz -C / + +# Starting Sharder with snapshot data +yq e -i ".services.sharder.image = \"0chaindev/sharder:${TAG}\"" /var/0chain/sharder/ssd/docker.local/build.sharder/p0docker-compose.yaml +cd /var/0chain/sharder/ssd/docker.local/sharder1/ +sudo bash ../bin/start.p0sharder.sh /var/0chain/sharder/ssd /var/0chain/sharder/hdd/ diff --git a/0chain/stake_miner.sh b/0chain/stake_miner.sh new file mode 100644 index 00000000..f65c21e4 --- /dev/null +++ b/0chain/stake_miner.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +echo -e "\n\e[93m=============================================================================================================================================================================== + setup variables +=============================================================================================================================================================================== \e[39m" +export PROJECT_ROOT=/var/0chain # /var/0chain +echo -e "\e[32m Successfully Created \e[23m \e[0;37m" + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading zwallet binary. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + echo "generating config.yaml file" + echo "block_worker: https://mainnet.zus.network/dns" > config.yaml + echo "signature_scheme: bls0chain" >> config.yaml + echo "min_submit: 20" >> config.yaml + echo "min_confirmation: 20" >> config.yaml + echo "confirmation_chain_length: 3" >> config.yaml + echo "max_txn_query: 5" >> config.yaml + echo "query_sleep_time: 5" >> config.yaml + + if [[ -f bin/zwallet ]] ; then + echo "zwallet binary already present" + else + ubuntu_version=$(lsb_release -rs | cut -f1 -d'.') + if [[ ${ubuntu_version} -eq 18 ]]; then + echo "Ubuntu 18 is not supported" + exit 1 + elif [[ ${ubuntu_version} -eq 20 || ${ubuntu_version} -eq 22 ]]; then + curl -L "https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/artifacts/zwallet-binary.zip" -o /tmp/zwallet-binary.zip + sudo unzip -o /tmp/zwallet-binary.zip && rm -rf /tmp/zwallet-binary.zip + mkdir bin || true + sudo cp -rf zwallet-binary/* ${PROJECT_ROOT}/bin/ + sudo rm -rf zwallet-binary + else + echo "Didn't found any Ubuntu version with 20/22." + fi + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Persisting miner wallets id. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + #Delegate wallet input + if [[ -f delegate_wallet.json ]] ; then + CLIENTID=$(cat del_wal_id.txt) + echo "Delegate wallet id exists i.e.: ${CLIENTID}" + if [[ -f keys/b0mnode1_keys.json ]] ; then + MINER_ID=$(jq -r .client_id keys/b0mnode1_keys.json) + else + echo "##### Miner wallet not present on your server. Please stake miner manually using delegate_wallet.json using below command. #####" + echo "./bin/zwallet mn-lock --miner_id --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + exit 1 + fi + else + echo "##### Delegate wallet not present on your server. Please stake miner manually using delegate_wallet.json using below command. #####" + echo "./bin/zwallet mn-lock --miner_id --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + exit 1 + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Staking miner using delegate wallets. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + echo "./bin/zwallet mn-lock --miner_id ${MINER_ID} --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + ./bin/zwallet mn-lock --miner_id ${MINER_ID} --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json +popd > /dev/null; + + +############ Steps to run the script ############ +# 1. wget -N https://raw.githubusercontent.com/0chain/zcnwebappscripts/as-deploy/0chain/stake_miner.sh; +# 2. bash miner_sharder.sh &> ./miner_staking.log +################################################ diff --git a/0chain/stake_sharder.sh b/0chain/stake_sharder.sh new file mode 100644 index 00000000..e5b25dde --- /dev/null +++ b/0chain/stake_sharder.sh @@ -0,0 +1,77 @@ +#!/bin/bash + +set -e + +echo -e "\n\e[93m=============================================================================================================================================================================== + setup variables +=============================================================================================================================================================================== \e[39m" +export PROJECT_ROOT=/var/0chain # /var/0chain +echo -e "\e[32m Successfully Created \e[23m \e[0;37m" + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading zwallet binary. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + echo "generating config.yaml file" + echo "block_worker: https://mainnet.zus.network/dns" > config.yaml + echo "signature_scheme: bls0chain" >> config.yaml + echo "min_submit: 20" >> config.yaml + echo "min_confirmation: 20" >> config.yaml + echo "confirmation_chain_length: 3" >> config.yaml + echo "max_txn_query: 5" >> config.yaml + echo "query_sleep_time: 5" >> config.yaml + + if [[ -f bin/zwallet ]] ; then + echo "zwallet binary already present" + else + ubuntu_version=$(lsb_release -rs | cut -f1 -d'.') + if [[ ${ubuntu_version} -eq 18 ]]; then + echo "Ubuntu 18 is not supported" + exit 1 + elif [[ ${ubuntu_version} -eq 20 || ${ubuntu_version} -eq 22 ]]; then + curl -L "https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/artifacts/zwallet-binary.zip" -o /tmp/zwallet-binary.zip + sudo unzip -o /tmp/zwallet-binary.zip && rm -rf /tmp/zwallet-binary.zip + mkdir bin || true + sudo cp -rf zwallet-binary/* ${PROJECT_ROOT}/bin/ + sudo rm -rf zwallet-binary + else + echo "Didn't found any Ubuntu version with 20/22." + fi + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Persisting sharder wallets id. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + #Delegate wallet input + if [[ -f delegate_wallet.json ]] ; then + CLIENTID=$(cat del_wal_id.txt) + echo "Delegate wallet id exists i.e.: ${CLIENTID}" + if [[ -f keys/b0snode1_keys.json ]] ; then + SHARDER_ID=$(jq -r .client_id keys/b0snode1_keys.json) + else + echo "##### Sharder wallet not present on your server. Please stake sharder manually using delegate_wallet.json using below command. #####" + echo "./bin/zwallet mn-lock --sharder_id --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + exit 1 + fi + else + echo "##### Delegate wallet not present on your server. Please stake sharder manually using delegate_wallet.json using below command. #####" + echo "./bin/zwallet mn-lock --sharder_id --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + exit 1 + fi +popd > /dev/null; + +echo -e "\n\e[93m=============================================================================================================================================================================== + Staking using delegate wallets. +=============================================================================================================================================================================== \e[39m" +pushd ${PROJECT_ROOT} > /dev/null; + echo "./bin/zwallet mn-lock --sharder_id ${SHARDER_ID} --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json" + ./bin/zwallet mn-lock --sharder_id ${SHARDER_ID} --tokens 50000 --configDir . --config ./config.yaml --wallet delegate_wallet.json +popd > /dev/null; + + +############ Steps to run the script ############ +# 1. wget -N https://raw.githubusercontent.com/0chain/zcnwebappscripts/as-deploy/0chain/stake_sharder.sh; +# 2. bash stake_sharder.sh &> ./sharder_staking.log +################################################ diff --git a/0chain/transfer_tokens.sh b/0chain/transfer_tokens.sh new file mode 100644 index 00000000..51649ac9 --- /dev/null +++ b/0chain/transfer_tokens.sh @@ -0,0 +1,59 @@ +#!/bin/bash + +set -e + +echo -e "\n\e[93m=============================================================================================================================================================================== + Downloading zwallet binary. +=============================================================================================================================================================================== \e[39m" +echo "Generating config.yaml file" +echo "block_worker: https://mainnet.zus.network/dns" > config.yaml +echo "signature_scheme: bls0chain" >> config.yaml +echo "min_submit: 20" >> config.yaml +echo "min_confirmation: 5" >> config.yaml +echo "confirmation_chain_length: 3" >> config.yaml +echo "max_txn_query: 5" >> config.yaml +echo "query_sleep_time: 5" >> config.yaml + +if [[ -f bin/zwallet ]] ; then + echo "zwallet binary already present" +else + ubuntu_version=$(lsb_release -rs | cut -f1 -d'.') + if [[ ${ubuntu_version} -eq 18 ]]; then + echo "Ubuntu 18 is not supported" + exit 1 + elif [[ ${ubuntu_version} -eq 20 || ${ubuntu_version} -eq 22 ]]; then + curl -L "https://github.com/0chain/zcnwebappscripts/raw/as-deploy/0chain/artifacts/zwallet-binary.zip" -o /tmp/zwallet-binary.zip + sudo unzip -o /tmp/zwallet-binary.zip && rm -rf /tmp/zwallet-binary.zip + mkdir bin || true + sudo cp -rf zwallet-binary/* bin/ + sudo rm -rf zwallet-binary + else + echo "Didn't found any Ubuntu version with 20/22." + fi +fi + +echo -e "\n\e[93m=============================================================================================================================================================================== + Fetching and transfering tokens to all delegate wallets +=============================================================================================================================================================================== \e[39m" +i=0 +domains=$(jq -r .[].domain others/del_wallets.json) +arr=($domains) +for del_wal in $(jq -r .[].client_id others/del_wallets.json); do + echo "###################################################################################################################################################################################" + echo "## SNo.$i :: Transfering 51000 tokens for domain ${arr[i]} to delegate wallet $del_wal from team wallet ##" + echo "###################################################################################################################################################################################" + sleep 2s + ./bin/zwallet send --to_client_id ${del_wal} --tokens 51000 --desc "delegate" --wallet ./team_wallet.json --configDir . --config ./config.yaml + echo + echo + ((i++)) +done + +############ Steps to run the script ############ +# 1. git clone https://github.com/0chain/zcnwebappscripts/tree/as-deploy +# 2. cd zcnwebappscripts +# 3. git checkout as-deploy +# 4. cd 0chain +# 5. copy team_wallet.json to current directory. +# 6. bash transfer_token.sh &> ./transfer_token.log +################################################ diff --git a/0chain/zwallet-binary/zbox b/0chain/zwallet-binary/zbox new file mode 100644 index 00000000..cb667a57 Binary files /dev/null and b/0chain/zwallet-binary/zbox differ diff --git a/0chain/zwallet-binary/zwallet b/0chain/zwallet-binary/zwallet index 00f91326..85d07e9b 100755 Binary files a/0chain/zwallet-binary/zwallet and b/0chain/zwallet-binary/zwallet differ