Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Feature 20230701 multishard #176

Merged
merged 2 commits into from
Jul 2, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 6 additions & 1 deletion CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -168,6 +168,7 @@ list(APPEND eraftvdb_sources src/info_command_handler.cc)
list(APPEND eraftvdb_sources src/set_command_handler.cc)
list(APPEND eraftvdb_sources src/get_command_handler.cc)
list(APPEND eraftvdb_sources src/unknow_command_handler.cc)
list(APPEND eraftvdb_sources src/util.cc)

add_executable(eraft-vdb ${eraftvdb_sources})
target_link_libraries(eraft-vdb
Expand Down Expand Up @@ -237,6 +238,7 @@ add_executable(eraftkv_server_test
src/rocksdb_storage_impl.cc
src/log_entry_cache.cc
src/grpc_network_impl.cc
src/util.cc
)
target_link_libraries(eraftkv_server_test PUBLIC
${GTEST_LIBRARIES}
Expand Down Expand Up @@ -310,6 +312,7 @@ add_executable(grpc_network_impl_test
src/log_storage_impl.cc
src/rocksdb_storage_impl.cc
src/log_entry_cache.cc
src/util.cc
)
target_link_libraries(grpc_network_impl_test PUBLIC
${GTEST_LIBRARIES}
Expand All @@ -325,7 +328,9 @@ add_executable(eraftkv-ctl
src/eraftkv_ctl.cc
src/eraftkv.pb.cc
src/eraftkv.grpc.pb.cc
)
src/util.cc
)

target_link_libraries(eraftkv-ctl PUBLIC
gRPC::grpc++
${Protobuf_LIBRARY}
Expand Down
25 changes: 16 additions & 9 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -51,28 +51,35 @@ rm-net:
docker network rm mytestnetwork

run-demo:
docker run --name kvserver-node1 --network mytestnetwork --ip 172.18.0.2 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 0 /tmp/kv_db0 /tmp/log_db0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
sleep 5
docker run --name kvserver-node2 --network mytestnetwork --ip 172.18.0.3 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 1 /tmp/kv_db1 /tmp/log_db1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
docker run --name kvserver-node3 --network mytestnetwork --ip 172.18.0.4 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 2 /tmp/kv_db2 /tmp/log_db2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
sleep 10
docker run --name vdbserver-node --network mytestnetwork --ip 172.18.0.6 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraft-vdb 172.18.0.6:12306 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
docker run --name kvserver-node1 --network mytestnetwork --ip 172.18.0.10 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 0 /tmp/kv_db0 /tmp/log_db0 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
sleep 2
docker run --name kvserver-node2 --network mytestnetwork --ip 172.18.0.11 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 1 /tmp/kv_db1 /tmp/log_db1 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
docker run --name kvserver-node3 --network mytestnetwork --ip 172.18.0.12 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 2 /tmp/kv_db2 /tmp/log_db2 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
sleep 1
docker run --name metaserver-node1 --network mytestnetwork --ip 172.18.0.2 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 0 /tmp/meta_db0 /tmp/log_db0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
sleep 3
docker run --name metaserver-node2 --network mytestnetwork --ip 172.18.0.3 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 1 /tmp/meta_db1 /tmp/log_db1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
docker run --name metaserver-node3 --network mytestnetwork --ip 172.18.0.4 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 2 /tmp/meta_db2 /tmp/log_db2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
sleep 16
docker run --name metaserver-tests --network mytestnetwork --ip 172.18.0.8 -it --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta_server_test
sleep 2
docker run --name vdbserver-node --network mytestnetwork --ip 172.18.0.6 -it --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraft-vdb 172.18.0.6:12306 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090

stop-demo:
docker stop kvserver-node1 kvserver-node2 kvserver-node3 vdbserver-node
docker stop kvserver-node1 kvserver-node2 kvserver-node3 vdbserver-node metaserver-node1 metaserver-node2 metaserver-node3

run-demo-bench:
docker run --name kvserver-bench --network mytestnetwork --ip 172.18.0.5 --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv-ctl 172.18.0.2:8088 bench 64 64 10

run-vdb-tests:
chmod +x utils/run-vdb-tests.sh
docker run --name vdbserver-node-tests --network mytestnetwork --ip 172.18.0.8 -it --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/utils/run-vdb-tests.sh
docker run --name vdbserver-node-tests --network mytestnetwork --ip 172.18.0.9 -it --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/utils/run-vdb-tests.sh

run-metaserver-tests:
docker run --name metaserver-node1 --network mytestnetwork --ip 172.18.0.2 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 0 /tmp/meta_db0 /tmp/log_db0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
docker run --name metaserver-node2 --network mytestnetwork --ip 172.18.0.3 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 1 /tmp/meta_db1 /tmp/log_db1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
docker run --name metaserver-node3 --network mytestnetwork --ip 172.18.0.4 -d --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 2 /tmp/meta_db2 /tmp/log_db2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
sleep 10
sleep 6
docker run --name metaserver-tests --network mytestnetwork --ip 172.18.0.8 -it --rm -v $(realpath .):/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta_server_test
sleep 2
docker stop metaserver-node1 metaserver-node2 metaserver-node3
142 changes: 80 additions & 62 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@

ERaftKDB is a distributed database that supports the Redis RESP protocol, and uses ERaftKV as the distributed storage layer.

![eraft-kdb](doc/eraft-kdb.jpg)
![eraft-kdb](doc/eraft-kdb.png)

## ERaftKV

Expand All @@ -20,6 +20,14 @@ ERaftKV is a persistent distributed KV storage system, uses the Raft protocol to

# Getting Started

## Build

Execute follower build command on the machine with docker installed.

```
sudo make build-dev
```

## Run demo in docker

- step 1, create docker sub net
Expand All @@ -34,23 +42,59 @@ docker network create --subnet=172.18.0.0/16 mytestnetwork
f57ad3d454f27f4b84efca3ce61bf4764bd30ce3d4971b85477daf05c6ae28a3
```

- step 2, run cluster
- step 2, run cluster in shard mode

```
sudo make run-demo
```
command output
```
docker run --name kvserver-node1 --network mytestnetwork --ip 172.18.0.2 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 0 /tmp/kv_db0 /tmp/log_db0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
b11928b1281d6693a1fb12d12ab6fbc1f4b13c509d983fb3f04551fdcdff5d32
sleep 4s
docker run --name kvserver-node2 --network mytestnetwork --ip 172.18.0.3 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 1 /tmp/kv_db1 /tmp/log_db1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
7588f7ab8176a518ef6100157cfa6cc966ce719401b6f1909f7944230ef4266b
docker run --name kvserver-node3 --network mytestnetwork --ip 172.18.0.4 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 2 /tmp/kv_db2 /tmp/log_db2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
8192b50bfc5f00ab0ce43fe95013fa1c80b40a5383ed260333c3032bf7e62203
sleep 10s
docker run --name vdbserver-node --network mytestnetwork --ip 172.18.0.6 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraft-vdb 172.18.0.6:12306 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
32064172894ca5afb6bc20546121817da8c75438e36c54550b373d8236690653
docker run --name kvserver-node1 --network mytestnetwork --ip 172.18.0.10 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 0 /tmp/kv_db0 /tmp/log_db0 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
eca081a545a9eb8dbf9b05c2a307f38c74b4fea2910776e85c806c1b70cedf20
sleep 2
docker run --name kvserver-node2 --network mytestnetwork --ip 172.18.0.11 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 1 /tmp/kv_db1 /tmp/log_db1 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
74d14edf114f47889b50f0ed20ea810af7cd383de26ebdd3d1e36078290674e7
docker run --name kvserver-node3 --network mytestnetwork --ip 172.18.0.12 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftkv 2 /tmp/kv_db2 /tmp/log_db2 172.18.0.10:8088,172.18.0.11:8089,172.18.0.12:8090
36bd437e67d00f019732e95e31b7f7ab9c19739a0f10676f31e9c0a7fad98a6c
sleep 1
docker run --name metaserver-node1 --network mytestnetwork --ip 172.18.0.2 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 0 /tmp/meta_db0 /tmp/log_db0 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
f8a1382542f41d14e645ddeb285e8b93afc4367b8537e5bc4030487116d8f5cd
sleep 3
docker run --name metaserver-node2 --network mytestnetwork --ip 172.18.0.3 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 1 /tmp/meta_db1 /tmp/log_db1 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
7f5385341bc1f990f50020bd09526eaba3eeec56ab3c67fab325d313ab4ceaea
docker run --name metaserver-node3 --network mytestnetwork --ip 172.18.0.4 -d --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta 2 /tmp/meta_db2 /tmp/log_db2 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
666732b5a9b10cd828e9c0829bd97159b3ac9a7d39d1d3f2dcbbc2e5af654373
sleep 16
docker run --name metaserver-tests --network mytestnetwork --ip 172.18.0.8 -it --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraftmeta_server_test
[==========] Running 1 test from 1 test suite.
[----------] Global test environment set-up.
[----------] 1 test from EraftMetaServerTests
[ RUN ] EraftMetaServerTests.TestMetaBasicOp
DEBUG: cluster config resp -> success: true
shard_group {
id: 1
servers {
address: "172.18.0.10:8088"
}
servers {
id: 1
address: "172.18.0.11:8089"
}
servers {
id: 2
address: "172.18.0.12:8090"
}
}

[ OK ] EraftMetaServerTests.TestMetaBasicOp (4028 ms)
[----------] 1 test from EraftMetaServerTests (4028 ms total)

[----------] Global test environment tear-down
[==========] 1 test from 1 test suite ran. (4028 ms total)
[ PASSED ] 1 test.
sleep 2
docker run --name vdbserver-node --network mytestnetwork --ip 172.18.0.6 -it --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/build/eraft-vdb 172.18.0.6:12306 172.18.0.2:8088,172.18.0.3:8089,172.18.0.4:8090
run server success!
```

- step 3, run eraft vdb tests
Expand All @@ -61,15 +105,32 @@ sudo make run-vdb-tests
command output
```
chmod +x utils/run-vdb-tests.sh
docker run --name vdbserver-node-tests --network mytestnetwork --ip 172.18.0.8 -it --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/utils/run-vdb-tests.sh
docker run --name vdbserver-node-tests --network mytestnetwork --ip 172.18.0.9 -it --rm -v /home/colin/eraft:/eraft eraft/eraftkv:v0.0.6 /eraft/utils/run-vdb-tests.sh
+ redis-cli -h 172.18.0.6 -p 12306 info
server_id: 0,server_address: 172.18.0.2:8088,status: Running,Role: Follower
server_id: 1,server_address: 172.18.0.3:8089,status: Running,Role: Leader
server_id: 2,server_address: 172.18.0.4:8090,status: Running,Role: Follower
+ redis-cli -h 172.18.0.6 -p 12306 set a testvalue
server_id: 0,server_address: 172.18.0.10:8088,status: Running,Role: Leader
server_id: 1,server_address: 172.18.0.11:8089,status: Running,Role: Follower
server_id: 2,server_address: 172.18.0.12:8090,status: Running,Role: Follower
+ redis-cli -h 172.18.0.6 -p 12306 set a h
OK
+ redis-cli -h 172.18.0.6 -p 12306 set b e
OK
+ redis-cli -h 172.18.0.6 -p 12306 set c l
OK
+ redis-cli -h 172.18.0.6 -p 12306 set d l
OK
+ redis-cli -h 172.18.0.6 -p 12306 set e o
OK
+ sleep 1
+ redis-cli -h 172.18.0.6 -p 12306 get a
"testvalue"
"h"
+ redis-cli -h 172.18.0.6 -p 12306 get b
"e"
+ redis-cli -h 172.18.0.6 -p 12306 get c
"l"
+ redis-cli -h 172.18.0.6 -p 12306 get d
"l"
+ redis-cli -h 172.18.0.6 -p 12306 get e
"o"
```

- step 4, clean all
Expand Down Expand Up @@ -106,50 +167,7 @@ make image
```

# Documentation

## Run ERaftKV server group demo in physical machine

- how to set up demo cluster?

```
./build/eraftkv 0 /tmp/kv_db0 /tmp/log_db0 127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090
./build/eraftkv 1 /tmp/kv_db1 /tmp/log_db1 127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090
./build/eraftkv 2 /tmp/kv_db2 /tmp/log_db2 127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090

```

- how to join an new node?

```
./build/eraftkv 3 /tmp/kv_db4 /tmp/log_db4 127.0.0.1:8088,127.0.0.1:8089,127.0.0.1:8090,127.0.0.1:8091

./build/eraftkv-ctl addnode 3 127.0.0.1:8091
```

## example usage

- put kv
```
./eraftkv-ctl [leader_address] put [key] [value]
```
- get kv
```
./eraftkv-ctl [leader_address] get [key]
```

- addnode to raft group
```
./eraftkv-ctl [leader_address] addnode [node id] [node address]
```
- remove node from raft group
```
./eraftkv-ctl [leader_address] removenode [node id]
```

- run kv benchmark
```
./eraftkv-ctl [leader_address] bench [key size] [value size] [op count]
```
[ERaftKV Documentation](doc/eraft-vdb.md)

# Contributing

Expand Down
Binary file removed doc/eraft-kdb.jpg
Binary file not shown.
Binary file added doc/eraft-kdb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
1 change: 1 addition & 0 deletions doc/eraft-kdb.svg
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
11 changes: 0 additions & 11 deletions doc/meta-server.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,14 +4,3 @@ K -> V

[ ['S'] ['G'] ['M'] ['E'] ['T'] ['A'] [ID (int64)]] -> ShardGroup protobuf serialized message

- Join

example:

[ ['S'] ['G'] ['M'] ['E'] ['T'] ['A'] [1] ] ->
{ id: 1, slots: [], servers: ['0-127.0.0.1:8088-UP,1-127.0.0.1:8089-UP,2-127.0.0.1:8090-UP'], leader_id: 0}

- Leave


- Query
4 changes: 3 additions & 1 deletion protocol/eraftkv.proto
Original file line number Diff line number Diff line change
Expand Up @@ -168,7 +168,9 @@ message ClientOperationReq {
}

message ClientOperationResp {
repeated KvOpPair ops = 2;
repeated KvOpPair ops = 1;
ErrorCode error_code = 2;
int64 leader_addr = 3;
}

service ERaftKv {
Expand Down
68 changes: 52 additions & 16 deletions src/client.cc
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,18 @@ PacketLength Client::_HandlePacket(const char *start, std::size_t bytes) {
return static_cast<PacketLength>(bytes);
}

Client::Client(std::string kv_addrs) : leader_addr_("") {
// init stub to kv server node
auto kv_node_addrs = StringUtil::Split(kv_addrs, ',');
for (auto kv_node_addr : kv_node_addrs) {
Client::Client(std::string meta_addrs) : leader_addr_("") {
// init stub to meta server node
auto meta_node_addrs = StringUtil::Split(meta_addrs, ',');
for (auto meta_node_addr : meta_node_addrs) {
TraceLog("DEBUG: init rpc link to ", meta_node_addr);
auto chan_ =
grpc::CreateChannel(kv_node_addr, grpc::InsecureChannelCredentials());
grpc::CreateChannel(meta_node_addr, grpc::InsecureChannelCredentials());
std::unique_ptr<ERaftKv::Stub> stub_(ERaftKv::NewStub(chan_));
this->stubs_[kv_node_addr] = std::move(stub_);
this->stubs_[meta_node_addr] = std::move(stub_);
}
// sync config
SyncClusterConfig();
_Reset();
}

Expand All @@ -60,19 +63,52 @@ void Client::_Reset() {

void Client::OnConnect() {}

std::string Client::GetLeaderAddr() {
std::string Client::GetLeaderAddr(std::string partion_key) {
std::string leader_address;
int64_t key_slot = -1;
key_slot = HashUtil::CRC64(0, partion_key.c_str(), partion_key.size()) % 1024;
TraceLog("DEBUG: partion key " + partion_key + " with slot ", key_slot);
for (auto sg : cluster_conf_.shard_group()) {
for (auto sl : sg.slots()) {
if (key_slot == sl.id()) {
// find sg leader addr
for (auto server : sg.servers()) {
if (server.id() == sg.leader_id()) {
ClientContext context;
eraftkv::ClusterConfigChangeReq req;
req.set_change_type(eraftkv::ChangeType::MetaMembersQuery);
eraftkv::ClusterConfigChangeResp resp;
auto status = stubs_[server.address()]->ClusterConfigChange(
&context, req, &resp);
for (int i = 0; i < resp.shard_group(0).servers_size(); i++) {
if (resp.shard_group(0).leader_id() ==
resp.shard_group(0).servers(i).id()) {
leader_address = resp.shard_group(0).servers(i).address();
}
}
}
}
}
}
}

return leader_address;
}

EStatus Client::SyncClusterConfig() {
ClientContext context;
eraftkv::ClusterConfigChangeReq req;
req.set_change_type(eraftkv::ChangeType::ShardsQuery);
eraftkv::ClusterConfigChangeResp resp;
auto status =
this->stubs_.begin()->second->ClusterConfigChange(&context, req, &resp);
std::string leader_addr = "";
for (int i = 0; i < resp.shard_group(0).servers_size(); i++) {
if (resp.shard_group(0).leader_id() ==
resp.shard_group(0).servers(i).id()) {
leader_addr = resp.shard_group(0).servers(i).address();
auto status_ = this->stubs_.begin()->second->ClusterConfigChange(
&context, req, &cluster_conf_);
for (auto sg : cluster_conf_.shard_group()) {
for (auto server : sg.servers()) {
auto chan_ = grpc::CreateChannel(server.address(),
grpc::InsecureChannelCredentials());
std::unique_ptr<ERaftKv::Stub> stub_(ERaftKv::NewStub(chan_));
this->stubs_[server.address()] = std::move(stub_);
}
}
return leader_addr;

return status_.ok() ? EStatus::kOk : EStatus::kError;
}
Loading