diff --git a/.gitfiles b/.gitfiles index be6e94472e..33418861a4 100644 --- a/.gitfiles +++ b/.gitfiles @@ -123,6 +123,7 @@ .github/workflows/semver-major-minor.yaml .github/workflows/semver-patch.yaml .github/workflows/test-hack.yaml +.github/workflows/unit-test-rust.yaml .github/workflows/unit-test.yaml .github/workflows/update-deps.yaml .github/workflows/update-protobuf.yaml @@ -157,7 +158,23 @@ Makefile.d/test.mk Makefile.d/tools.mk README.md SECURITY.md +apis/docs/buf.gen.doc.yaml +apis/docs/buf.gen.payload.yaml +apis/docs/buf.gen.tmpl.yaml +apis/docs/v1/doc.tmpl apis/docs/v1/docs.md +apis/docs/v1/filter.md +apis/docs/v1/flush.md +apis/docs/v1/index.md +apis/docs/v1/insert.md +apis/docs/v1/mirror.md +apis/docs/v1/object.md +apis/docs/v1/payload.md.tmpl +apis/docs/v1/payload.tmpl +apis/docs/v1/remove.md +apis/docs/v1/search.md +apis/docs/v1/update.md +apis/docs/v1/upsert.md apis/grpc/v1/agent/core/agent.go apis/grpc/v1/agent/core/agent.pb.go apis/grpc/v1/agent/core/agent_vtproto.pb.go @@ -468,6 +485,7 @@ cmd/agent/core/ngt/main_test.go cmd/agent/core/ngt/sample-cow.yaml cmd/agent/core/ngt/sample-fp16.yaml cmd/agent/core/ngt/sample.yaml +cmd/agent/core/qbg/sample.yaml cmd/agent/sidecar/main.go cmd/agent/sidecar/main_test.go cmd/discoverer/k8s/main.go @@ -491,6 +509,7 @@ cmd/index/job/creation/main.go cmd/index/job/creation/main_test.go cmd/index/job/creation/sample.yaml cmd/index/job/deletion/main.go +cmd/index/job/deletion/main_test.go cmd/index/job/deletion/sample.yaml cmd/index/job/readreplica/rotate/main.go cmd/index/job/readreplica/rotate/main_test.go @@ -841,6 +860,7 @@ internal/config/index.go internal/config/index_creation.go internal/config/index_creation_test.go internal/config/index_deleter.go +internal/config/index_deleter_test.go internal/config/index_operator.go internal/config/index_operator_test.go internal/config/index_save.go @@ -1064,6 +1084,7 @@ internal/io/copy_bench_test.go internal/io/copy_test.go internal/io/io.go internal/io/io_test.go +internal/iter/iter.go internal/k8s/client/client.go internal/k8s/client/client_test.go internal/k8s/client/option.go @@ -1249,7 +1270,9 @@ internal/net/net_test.go internal/net/option.go internal/net/option_test.go internal/net/quic/conn.go +internal/net/quic/conn_test.go internal/net/quic/listener.go +internal/net/quic/listener_test.go internal/observability/attribute/attribute.go internal/observability/attribute/attribute_test.go internal/observability/exporter/exporter.go @@ -1380,6 +1403,10 @@ internal/test/data/tls/invalid.crt internal/test/data/tls/invalid.pem internal/test/data/vector/gen.go internal/test/data/vector/gen_test.go +internal/test/data/vector/noise/noise.go +internal/test/data/vector/noise/noise_test.go +internal/test/data/vector/noise/option.go +internal/test/data/vector/noise/option_test.go internal/test/doc.go internal/test/goleak/goleak.go internal/test/goleak/goleak_test.go @@ -1795,9 +1822,13 @@ pkg/index/job/creation/service/options_test.go pkg/index/job/creation/usecase/creation.go pkg/index/job/creation/usecase/creation_test.go pkg/index/job/deletion/config/config.go +pkg/index/job/deletion/config/config_test.go pkg/index/job/deletion/service/deleter.go +pkg/index/job/deletion/service/deleter_test.go pkg/index/job/deletion/service/options.go +pkg/index/job/deletion/service/options_test.go pkg/index/job/deletion/usecase/deletion.go +pkg/index/job/deletion/usecase/deletion_test.go pkg/index/job/readreplica/rotate/config/config.go pkg/index/job/readreplica/rotate/config/config_test.go pkg/index/job/readreplica/rotate/service/options.go @@ -1931,6 +1962,7 @@ rust/bin/agent/src/handler.rs rust/bin/agent/src/handler/common.rs rust/bin/agent/src/handler/index.rs rust/bin/agent/src/handler/insert.rs +rust/bin/agent/src/handler/object.rs rust/bin/agent/src/handler/remove.rs rust/bin/agent/src/handler/search.rs rust/bin/agent/src/handler/update.rs @@ -1940,6 +1972,7 @@ rust/bin/meta/Cargo.toml rust/bin/meta/src/handler.rs rust/bin/meta/src/handler/meta.rs rust/bin/meta/src/main.rs +rust/bin/meta/src/test_client.rs rust/libs/algorithm/Cargo.toml rust/libs/algorithm/src/lib.rs rust/libs/algorithms/faiss/Cargo.toml @@ -1949,6 +1982,11 @@ rust/libs/algorithms/ngt/build.rs rust/libs/algorithms/ngt/src/input.cpp rust/libs/algorithms/ngt/src/input.h rust/libs/algorithms/ngt/src/lib.rs +rust/libs/algorithms/qbg/Cargo.toml +rust/libs/algorithms/qbg/build.rs +rust/libs/algorithms/qbg/src/input.cpp +rust/libs/algorithms/qbg/src/input.h +rust/libs/algorithms/qbg/src/lib.rs rust/libs/observability/Cargo.toml rust/libs/observability/src/config.rs rust/libs/observability/src/lib.rs @@ -1993,6 +2031,25 @@ tests/e2e/performance/max_vector_dim_test.go tests/e2e/pkg/agent/core/ngt/service/ngt_e2s_test.go tests/e2e/sidecar/sidecar_test.go tests/performance/max_vector_dim_test.go +tests/v2/e2e/assets/rollout.yaml +tests/v2/e2e/assets/unary_crud.yaml +tests/v2/e2e/config/config.go +tests/v2/e2e/config/enums.go +tests/v2/e2e/crud/crud_test.go +tests/v2/e2e/crud/dataset_test.go +tests/v2/e2e/crud/grpc_test.go +tests/v2/e2e/crud/index_test.go +tests/v2/e2e/crud/modification_test.go +tests/v2/e2e/crud/object_test.go +tests/v2/e2e/crud/search_test.go +tests/v2/e2e/crud/strategy_test.go +tests/v2/e2e/hdf5/hdf5.go +tests/v2/e2e/kubernetes/client.go +tests/v2/e2e/kubernetes/portforward/option.go +tests/v2/e2e/kubernetes/portforward/portforward.go +tests/v2/e2e/kubernetes/resources.go +tests/v2/e2e/kubernetes/rollout.go +tests/v2/e2e/kubernetes/status.go versions/BUF_VERSION versions/CHAOS_MESH_VERSION versions/CMAKE_VERSION @@ -2004,6 +2061,7 @@ versions/HDF5_VERSION versions/HELM_DOCS_VERSION versions/HELM_VERSION versions/JAEGER_OPERATOR_VERSION +versions/K3D_VERSION versions/K3S_VERSION versions/KIND_VERSION versions/KUBECTL_VERSION diff --git a/.github/workflows/_docker-image.yaml b/.github/workflows/_docker-image.yaml index 0406f9ac08..172daf98a4 100644 --- a/.github/workflows/_docker-image.yaml +++ b/.github/workflows/_docker-image.yaml @@ -70,6 +70,17 @@ jobs: - name: Set Git config run: | git config --global --add safe.directory ${GITHUB_WORKSPACE} + - name: Login to DockerHub + uses: docker/login-action@v3 + with: + username: ${{ secrets.DOCKERHUB_USER }} + password: ${{ secrets.DOCKERHUB_PASS }} + - name: Login to GitHub Container Registry + uses: docker/login-action@v3 + with: + registry: ghcr.io + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} - name: Setup QEMU uses: docker/setup-qemu-action@v3 with: @@ -85,17 +96,6 @@ jobs: image=ghcr.io/vdaas/vald/vald-buildkit:nightly network=host buildkitd-flags: "--debug --oci-worker-gc=false --oci-worker-snapshotter=stargz" - - name: Login to DockerHub - uses: docker/login-action@v3 - with: - username: ${{ secrets.DOCKERHUB_USER }} - password: ${{ secrets.DOCKERHUB_PASS }} - - name: Login to GitHub Container Registry - uses: docker/login-action@v3 - with: - registry: ghcr.io - username: ${{ secrets.PACKAGE_USER }} - password: ${{ secrets.PACKAGE_TOKEN }} - name: Build and Publish id: build_and_publish uses: ./.github/actions/docker-build diff --git a/.github/workflows/_release-pr.yaml b/.github/workflows/_release-pr.yaml index eddf2614ef..3c0ef4feea 100644 --- a/.github/workflows/_release-pr.yaml +++ b/.github/workflows/_release-pr.yaml @@ -41,6 +41,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} env: RELEASE_BRANCH_NAME: ${{ inputs.release_branch_name }} PREPARE_RELEASE_BRANCH_NAME: prepare/${{ inputs.release_branch_name }} diff --git a/.github/workflows/build-binaries.yaml b/.github/workflows/build-binaries.yaml index 59f671e269..2bb8d7d1a7 100644 --- a/.github/workflows/build-binaries.yaml +++ b/.github/workflows/build-binaries.yaml @@ -31,6 +31,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/build-protobuf.yaml b/.github/workflows/build-protobuf.yaml index a4af0a73e1..15eaa1910c 100644 --- a/.github/workflows/build-protobuf.yaml +++ b/.github/workflows/build-protobuf.yaml @@ -40,6 +40,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/chatops.yaml b/.github/workflows/chatops.yaml index 63a38d7dfc..7a820134e6 100644 --- a/.github/workflows/chatops.yaml +++ b/.github/workflows/chatops.yaml @@ -29,6 +29,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:nightly + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: Check PR Comments id: check_comments_label @@ -79,6 +82,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:nightly + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: Check PR Comments id: check_comments_rebase @@ -156,6 +162,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:nightly + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: check PR Comments id: check_comments_gen_test @@ -269,6 +278,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:nightly + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: check PR Comments id: check_comments_format @@ -401,6 +413,9 @@ jobs: runs-on: ubuntu-latest container: image: ghcr.io/vdaas/vald/vald-ci-container:nightly + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: check PR Comments id: check_comments_approve diff --git a/.github/workflows/codeql-analysis.yml b/.github/workflows/codeql-analysis.yml index d0154424a1..a9e1333b23 100644 --- a/.github/workflows/codeql-analysis.yml +++ b/.github/workflows/codeql-analysis.yml @@ -49,6 +49,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: Checkout repository uses: actions/checkout@v4 diff --git a/.github/workflows/coverage.yaml b/.github/workflows/coverage.yaml index 1c91cea5c6..1496c80ec5 100644 --- a/.github/workflows/coverage.yaml +++ b/.github/workflows/coverage.yaml @@ -42,6 +42,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/e2e-chaos.yaml b/.github/workflows/e2e-chaos.yaml index fe91004ccd..d9c11b764b 100644 --- a/.github/workflows/e2e-chaos.yaml +++ b/.github/workflows/e2e-chaos.yaml @@ -44,6 +44,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -85,6 +88,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -126,6 +132,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -167,6 +176,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/e2e-code-bench-agent.yaml b/.github/workflows/e2e-code-bench-agent.yaml index 9d11e71b6a..d62f9ad749 100644 --- a/.github/workflows/e2e-code-bench-agent.yaml +++ b/.github/workflows/e2e-code-bench-agent.yaml @@ -58,6 +58,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -85,6 +88,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/e2e-max-dim.yaml b/.github/workflows/e2e-max-dim.yaml index 518ce59589..d158c03d45 100644 --- a/.github/workflows/e2e-max-dim.yaml +++ b/.github/workflows/e2e-max-dim.yaml @@ -40,6 +40,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/e2e-profiling.yaml b/.github/workflows/e2e-profiling.yaml index bdd6dbd759..acfb216db7 100644 --- a/.github/workflows/e2e-profiling.yaml +++ b/.github/workflows/e2e-profiling.yaml @@ -42,6 +42,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/e2e.yaml b/.github/workflows/e2e.yaml index cd0b01df30..88eac0f40b 100644 --- a/.github/workflows/e2e.yaml +++ b/.github/workflows/e2e.yaml @@ -42,6 +42,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -83,6 +86,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -147,6 +153,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -192,6 +201,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -233,6 +245,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -270,6 +285,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -304,6 +322,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -355,6 +376,9 @@ jobs: timeout-minutes: 60 container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/format.yaml b/.github/workflows/format.yaml index ca231695ee..01d8b26b62 100644 --- a/.github/workflows/format.yaml +++ b/.github/workflows/format.yaml @@ -33,6 +33,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 with: @@ -89,6 +92,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 with: diff --git a/.github/workflows/fossa.yaml b/.github/workflows/fossa.yaml index 591dcab2d1..6d2a23a37c 100644 --- a/.github/workflows/fossa.yaml +++ b/.github/workflows/fossa.yaml @@ -36,6 +36,9 @@ jobs: needs: [dump-contexts-to-log, detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/helm-lint.yaml b/.github/workflows/helm-lint.yaml index 795007533d..c82580afd0 100644 --- a/.github/workflows/helm-lint.yaml +++ b/.github/workflows/helm-lint.yaml @@ -32,6 +32,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: Check out code. uses: actions/checkout@v4 @@ -50,6 +53,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - name: Check out code. uses: actions/checkout@v4 @@ -68,6 +74,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host=host.docker.internal:host-gateway" steps: - name: Check out code. diff --git a/.github/workflows/helm.yaml b/.github/workflows/helm.yaml index fa1b981dd1..5df2b7e145 100644 --- a/.github/workflows/helm.yaml +++ b/.github/workflows/helm.yaml @@ -35,6 +35,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} options: "--add-host host.docker.internal:host-gateway" steps: - uses: actions/checkout@v4 @@ -77,6 +80,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/reviewdog-k8s.yaml b/.github/workflows/reviewdog-k8s.yaml index b006496a36..8a1d9aa123 100644 --- a/.github/workflows/reviewdog-k8s.yaml +++ b/.github/workflows/reviewdog-k8s.yaml @@ -33,6 +33,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config @@ -53,6 +56,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config @@ -72,6 +78,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/reviewdog.yaml b/.github/workflows/reviewdog.yaml index a53eac78d4..4bf2f34b56 100644 --- a/.github/workflows/reviewdog.yaml +++ b/.github/workflows/reviewdog.yaml @@ -32,6 +32,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config @@ -51,6 +54,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/test-hack.yaml b/.github/workflows/test-hack.yaml index bba85ea494..a7a79f45c7 100644 --- a/.github/workflows/test-hack.yaml +++ b/.github/workflows/test-hack.yaml @@ -55,6 +55,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} defaults: run: working-directory: ${{ env.GOPATH }}/${{ env.PROJECT_ROOT_DIR }} diff --git a/.github/workflows/unit-test-rust.yaml b/.github/workflows/unit-test-rust.yaml index 2af9233787..7c7b1e22df 100644 --- a/.github/workflows/unit-test-rust.yaml +++ b/.github/workflows/unit-test-rust.yaml @@ -41,6 +41,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/unit-test.yaml b/.github/workflows/unit-test.yaml index cb08b47ad8..1be7b6c58a 100644 --- a/.github/workflows/unit-test.yaml +++ b/.github/workflows/unit-test.yaml @@ -51,6 +51,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config @@ -68,6 +71,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config @@ -85,6 +91,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 - name: Set Git config diff --git a/.github/workflows/update-deps.yaml b/.github/workflows/update-deps.yaml index eebd6c4839..b09b9c7698 100644 --- a/.github/workflows/update-deps.yaml +++ b/.github/workflows/update-deps.yaml @@ -31,6 +31,9 @@ jobs: needs: [detect-ci-container] container: image: ghcr.io/vdaas/vald/vald-ci-container:${{ needs.detect-ci-container.outputs.TAG }} + credentials: + username: ${{ secrets.PACKAGE_USER }} + password: ${{ secrets.PACKAGE_TOKEN }} steps: - uses: actions/checkout@v4 with: diff --git a/Makefile b/Makefile index a3cd1ffbc9..168182952e 100644 --- a/Makefile +++ b/Makefile @@ -164,9 +164,9 @@ LDFLAGS = -static -fPIC -pthread -std=gnu++23 -lstdc++ -lm -z relro -z now -flto NGT_LDFLAGS = -fopenmp -lopenblas -llapack FAISS_LDFLAGS = $(NGT_LDFLAGS) -lgfortran -HDF5_LDFLAGS = -lhdf5 -lhdf5_hl -lsz -laec -lz -ldl +HDF5_LDFLAGS = -lhdf5 -lhdf5_hl -lsz -laec -lz -ldl -lm CGO_LDFLAGS = $(FAISS_LDFLAGS) $(HDF5_LDFLAGS) -TEST_LDFLAGS = $(LDFLAGS) $(FAISS_LDFLAGS) $(HDF5_LDFLAGS) +TEST_LDFLAGS = $(LDFLAGS) $(CGO_LDFLAGS) ifeq ($(GOARCH),amd64) CFLAGS ?= -mno-avx512f -mno-avx512dq -mno-avx512cd -mno-avx512bw -mno-avx512vl @@ -352,6 +352,8 @@ CSPELL_EXTRA_OPTIONS ?= COMMA := , SHELL = bash +E2E_CONFIG ?= $(ROOTDIR)/tests/v2/e2e/assets/unary_crud.yaml +E2E_ADDR ?= $(E2E_BIND_HOST):$(E2E_BIND_PORT) E2E_BIND_HOST ?= 127.0.0.1 E2E_BIND_PORT ?= 8082 E2E_DATASET_NAME ?= fashion-mnist-784-euclidean.hdf5 @@ -465,7 +467,13 @@ clean-generated: .PHONY: files ## add current repository file list to .gitfiles files: - git ls-files > $(ROOTDIR)/.gitfiles + @if [ ! -f $(ROOTDIR)/.gitfiles ]; then \ + printf '\n%.0s' {1..15} > $(ROOTDIR)/.gitfiles; \ + else \ + head -n 15 $(ROOTDIR)/.gitfiles > $(ROOTDIR)/.gitfiles.tmp; \ + git ls-files >> $(ROOTDIR)/.gitfiles.tmp; \ + mv $(ROOTDIR)/.gitfiles.tmp $(ROOTDIR)/.gitfiles; \ + fi .PHONY: license ## add license to files @@ -541,12 +549,17 @@ format/go: \ golines/install \ gofumpt/install \ strictgoimports/install \ - goimports/install - find $(ROOTDIR)/ -type d -name .git -prune -o -type f -regex '.*\.go' -print | xargs -P$(CORES) $(GOBIN)/golines -w -m $(GOLINES_MAX_WIDTH) - find $(ROOTDIR)/ -type d -name .git -prune -o -type f -regex '.*\.go' -print | xargs -P$(CORES) $(GOBIN)/strictgoimports -w - find $(ROOTDIR)/ -type d -name .git -prune -o -type f -regex '.*\.go' -print | xargs -P$(CORES) $(GOBIN)/goimports -w - find $(ROOTDIR)/ -type d -name .git -prune -o -type f -regex '.*\.go' -print | xargs -P$(CORES) $(GOBIN)/crlfmt -w -diff=false - find $(ROOTDIR)/ -type d -name .git -prune -o -type f -regex '.*\.go' -print | xargs -P$(CORES) $(GOBIN)/gofumpt -w + goimports/install \ + files + @echo "Formatting Go files..." + @cat $(ROOTDIR)/.gitfiles | grep -e "\.go$$" | grep -v "_test\.go$$" | xargs -I {} -P$(CORES) bash -c '\ + echo "Formatting {}" && \ + $(GOBIN)/golines -w -m $(GOLINES_MAX_WIDTH) {} && \ + $(GOBIN)/strictgoimports -w {} && \ + $(GOBIN)/goimports -w {} && \ + $(GOBIN)/crlfmt -w -diff=false {} && \ + $(GOBIN)/gofumpt -w {}' + @echo "Go formatting complete." .PHONY: format/go/test ## run golines, gofumpt, goimports for go test files @@ -555,12 +568,17 @@ format/go/test: \ golines/install \ gofumpt/install \ strictgoimports/install \ - goimports/install - find $(ROOTDIR) -name '*_test.go' | xargs -P$(CORES) $(GOBIN)/golines -w -m $(GOLINES_MAX_WIDTH) - find $(ROOTDIR) -name '*_test.go' | xargs -P$(CORES) $(GOBIN)/strictgoimports -w - find $(ROOTDIR) -name '*_test.go' | xargs -P$(CORES) $(GOBIN)/goimports -w - find $(ROOTDIR) -name '*_test.go' | xargs -P$(CORES) $(GOBIN)/crlfmt -w -diff=false - find $(ROOTDIR) -name '*_test.go' | xargs -P$(CORES) $(GOBIN)/gofumpt -w + goimports/install \ + files + @echo "Formatting Go Test files..." + @cat $(ROOTDIR)/.gitfiles | grep -e "_test\.go$$" | xargs -I {} -P$(CORES) bash -c '\ + echo "Formatting Test file {}" && \ + $(GOBIN)/golines -w -m $(GOLINES_MAX_WIDTH) {} && \ + $(GOBIN)/strictgoimports -w {} && \ + $(GOBIN)/goimports -w {} && \ + $(GOBIN)/crlfmt -w -diff=false {} && \ + $(GOBIN)/gofumpt -w {}' + @echo "Go test file formatting complete." .PHONY: format/yaml format/yaml: \ diff --git a/Makefile.d/e2e.mk b/Makefile.d/e2e.mk index 6ebdc3ad35..00c515d4e3 100644 --- a/Makefile.d/e2e.mk +++ b/Makefile.d/e2e.mk @@ -19,6 +19,11 @@ e2e: $(call run-e2e-crud-test,-run TestE2EStandardCRUD) +.PHONY: e2e/v2 +## run e2e +e2e/v2: + $(call run-v2-e2e-crud-test,-run TestE2EStrategy) + .PHONY: e2e/faiss ## run e2e/faiss e2e/faiss: diff --git a/Makefile.d/functions.mk b/Makefile.d/functions.mk index 58d8ca8f17..b2718c3eb4 100644 --- a/Makefile.d/functions.mk +++ b/Makefile.d/functions.mk @@ -131,6 +131,28 @@ define telepresence ## --deployment-type "$(SWAP_DEPLOYMENT_TYPE)" endef +define run-v2-e2e-crud-test + GOPRIVATE=$(GOPRIVATE) \ + GOARCH=$(GOARCH) \ + GOOS=$(GOOS) \ + CGO_LDFLAGS="$(CGO_LDFLAGS)" \ + E2E_ADDR="$(E2E_BIND_HOST):$(E2E_BIND_PORT)" \ + E2E_BIND_HOST="$(E2E_BIND_HOST)" \ + E2E_BIND_PORT="$(E2E_BIND_PORT)" \ + E2E_TARGET_NAMESPACE="$(E2E_TARGET_NAMESPACE)" \ + E2E_TARGET_NAME="$(E2E_TARGET_NAME)" \ + E2E_DATASET_PATH="$(ROOTDIR)/hack/benchmark/assets/dataset/$(E2E_DATASET_NAME)" \ + go test \ + -race \ + -v \ + -mod=readonly \ + $1 \ + $(ROOTDIR)/tests/v2/e2e/crud \ + -tags "e2e" \ + -timeout $(E2E_TIMEOUT) \ + -config $(E2E_CONFIG) +endef + define run-e2e-crud-test GOPRIVATE=$(GOPRIVATE) \ GOARCH=$(GOARCH) \ diff --git a/buf.gen.yaml b/buf.gen.yaml index 7f3834ed1e..6f8c4afbf1 100644 --- a/buf.gen.yaml +++ b/buf.gen.yaml @@ -33,6 +33,9 @@ plugins: - remote: buf.build/community/mfridman-go-json out: apis/grpc opt: paths=source_relative + - remote: buf.build/srikrsna/protoc-gen-gotag + out: apis/grpc + opt: paths=source_relative - remote: buf.build/community/pseudomuto-doc out: apis/docs/v1 opt: markdown,docs.md diff --git a/cmd/index/job/deletion/main_test.go b/cmd/index/job/deletion/main_test.go new file mode 100644 index 0000000000..877d9c8b80 --- /dev/null +++ b/cmd/index/job/deletion/main_test.go @@ -0,0 +1,74 @@ +package main + +// NOT IMPLEMENTED BELOW +// +// func Test_main(t *testing.T) { +// type want struct { +// } +// type test struct { +// name string +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// main() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/dockers/agent/core/agent/Dockerfile b/dockers/agent/core/agent/Dockerfile index c77f010b4a..caf56e6e0e 100644 --- a/dockers/agent/core/agent/Dockerfile +++ b/dockers/agent/core/agent/Dockerfile @@ -94,4 +94,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/agent /usr/bin/agent # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/agent"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/agent"] diff --git a/dockers/agent/core/faiss/Dockerfile b/dockers/agent/core/faiss/Dockerfile index 49288c2c95..12d0a9ecf8 100644 --- a/dockers/agent/core/faiss/Dockerfile +++ b/dockers/agent/core/faiss/Dockerfile @@ -96,4 +96,4 @@ COPY --from=builder /usr/bin/faiss /usr/bin/faiss COPY cmd/agent/core/faiss/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/faiss"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/faiss"] diff --git a/dockers/agent/core/ngt/Dockerfile b/dockers/agent/core/ngt/Dockerfile index c806de1ea6..2acf8fc761 100644 --- a/dockers/agent/core/ngt/Dockerfile +++ b/dockers/agent/core/ngt/Dockerfile @@ -95,4 +95,4 @@ COPY --from=builder /usr/bin/ngt /usr/bin/ngt COPY cmd/agent/core/ngt/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/ngt"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/ngt"] diff --git a/dockers/agent/sidecar/Dockerfile b/dockers/agent/sidecar/Dockerfile index d5cd04ea94..ede4d8c618 100644 --- a/dockers/agent/sidecar/Dockerfile +++ b/dockers/agent/sidecar/Dockerfile @@ -85,4 +85,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/sidecar /usr/bin/sidecar # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/sidecar"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/sidecar"] diff --git a/dockers/binfmt/Dockerfile b/dockers/binfmt/Dockerfile index f63da8d40a..0be93ba3d9 100644 --- a/dockers/binfmt/Dockerfile +++ b/dockers/binfmt/Dockerfile @@ -17,4 +17,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM tonistiigi/binfmt:master AS builder \ No newline at end of file +FROM tonistiigi/binfmt:master AS builder diff --git a/dockers/buildbase/Dockerfile b/dockers/buildbase/Dockerfile index 85d2385659..5dde4958c7 100644 --- a/dockers/buildbase/Dockerfile +++ b/dockers/buildbase/Dockerfile @@ -17,4 +17,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM ubuntu:devel AS builder \ No newline at end of file +FROM ubuntu:devel AS builder diff --git a/dockers/buildkit/Dockerfile b/dockers/buildkit/Dockerfile index e63c6f5ebc..43a5a5b0b3 100644 --- a/dockers/buildkit/Dockerfile +++ b/dockers/buildkit/Dockerfile @@ -17,4 +17,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM moby/buildkit:master AS builder \ No newline at end of file +FROM moby/buildkit:master AS builder diff --git a/dockers/buildkit/syft/scanner/Dockerfile b/dockers/buildkit/syft/scanner/Dockerfile index b840429bf4..87be4558e9 100644 --- a/dockers/buildkit/syft/scanner/Dockerfile +++ b/dockers/buildkit/syft/scanner/Dockerfile @@ -17,4 +17,4 @@ # # DO_NOT_EDIT this Dockerfile is generated by https://github.com/vdaas/vald/blob/main/hack/docker/gen/main.go -FROM docker/buildkit-syft-scanner:edge AS scanner \ No newline at end of file +FROM docker/buildkit-syft-scanner:edge AS scanner diff --git a/dockers/ci/base/Dockerfile b/dockers/ci/base/Dockerfile index 540c43eb2b..12572f742a 100644 --- a/dockers/ci/base/Dockerfile +++ b/dockers/ci/base/Dockerfile @@ -127,4 +127,4 @@ RUN --mount=type=bind,target=.,rw \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 USER root:root -ENTRYPOINT ["/bin/bash"] \ No newline at end of file +ENTRYPOINT ["/bin/bash"] diff --git a/dockers/dev/Dockerfile b/dockers/dev/Dockerfile index 03c4eeaa6f..f176b94d28 100644 --- a/dockers/dev/Dockerfile +++ b/dockers/dev/Dockerfile @@ -140,4 +140,4 @@ RUN --mount=type=bind,target=.,rw \ && make faiss/install \ && rm -rf ${GOPATH}/src/github.com/${ORG}/${REPO}/* # skipcq: DOK-DL3002 -USER root:root \ No newline at end of file +USER root:root diff --git a/dockers/discoverer/k8s/Dockerfile b/dockers/discoverer/k8s/Dockerfile index b62f0b6483..91555000dd 100644 --- a/dockers/discoverer/k8s/Dockerfile +++ b/dockers/discoverer/k8s/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/discoverer /usr/bin/discoverer COPY cmd/discoverer/k8s/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/discoverer"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/discoverer"] diff --git a/dockers/example/client/Dockerfile b/dockers/example/client/Dockerfile index 6d4b012261..5ec5cdc9a8 100644 --- a/dockers/example/client/Dockerfile +++ b/dockers/example/client/Dockerfile @@ -92,4 +92,4 @@ LABEL maintainer="vdaas.org vald team " COPY --from=builder /usr/bin/client /usr/bin/client # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/client"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/client"] diff --git a/dockers/gateway/filter/Dockerfile b/dockers/gateway/filter/Dockerfile index 1764f27578..81538ee43d 100644 --- a/dockers/gateway/filter/Dockerfile +++ b/dockers/gateway/filter/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/filter /usr/bin/filter COPY cmd/gateway/filter/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/filter"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/filter"] diff --git a/dockers/gateway/lb/Dockerfile b/dockers/gateway/lb/Dockerfile index eba440c8fc..e31433c4b6 100644 --- a/dockers/gateway/lb/Dockerfile +++ b/dockers/gateway/lb/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/lb /usr/bin/lb COPY cmd/gateway/lb/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/lb"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/lb"] diff --git a/dockers/gateway/mirror/Dockerfile b/dockers/gateway/mirror/Dockerfile index 3010c06b95..2ae86669a2 100644 --- a/dockers/gateway/mirror/Dockerfile +++ b/dockers/gateway/mirror/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/mirror /usr/bin/mirror COPY cmd/gateway/mirror/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/mirror"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/mirror"] diff --git a/dockers/index/job/correction/Dockerfile b/dockers/index/job/correction/Dockerfile index 8ae06c4216..058abbf82d 100644 --- a/dockers/index/job/correction/Dockerfile +++ b/dockers/index/job/correction/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index-correction /usr/bin/index-correction COPY cmd/index/job/correction/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-correction"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-correction"] diff --git a/dockers/index/job/creation/Dockerfile b/dockers/index/job/creation/Dockerfile index f04848ba16..96262c44a0 100644 --- a/dockers/index/job/creation/Dockerfile +++ b/dockers/index/job/creation/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index-creation /usr/bin/index-creation COPY cmd/index/job/creation/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-creation"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-creation"] diff --git a/dockers/index/job/deletion/Dockerfile b/dockers/index/job/deletion/Dockerfile index a8e30aafb3..d4226f8896 100644 --- a/dockers/index/job/deletion/Dockerfile +++ b/dockers/index/job/deletion/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index-deletion /usr/bin/index-deletion COPY cmd/index/job/deletion/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-deletion"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-deletion"] diff --git a/dockers/index/job/readreplica/rotate/Dockerfile b/dockers/index/job/readreplica/rotate/Dockerfile index 757abce176..330e68b21f 100644 --- a/dockers/index/job/readreplica/rotate/Dockerfile +++ b/dockers/index/job/readreplica/rotate/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/readreplica-rotate /usr/bin/readreplica-rotate COPY cmd/index/job/readreplica/rotate/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/readreplica-rotate"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/readreplica-rotate"] diff --git a/dockers/index/job/save/Dockerfile b/dockers/index/job/save/Dockerfile index 88eaca0c07..903988ea48 100644 --- a/dockers/index/job/save/Dockerfile +++ b/dockers/index/job/save/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index-save /usr/bin/index-save COPY cmd/index/job/save/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-save"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-save"] diff --git a/dockers/index/operator/Dockerfile b/dockers/index/operator/Dockerfile index c31cf644bc..fdfa113d5e 100644 --- a/dockers/index/operator/Dockerfile +++ b/dockers/index/operator/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index-operator /usr/bin/index-operator COPY cmd/index/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index-operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index-operator"] diff --git a/dockers/manager/index/Dockerfile b/dockers/manager/index/Dockerfile index c3f00056e2..80426e7c6d 100644 --- a/dockers/manager/index/Dockerfile +++ b/dockers/manager/index/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/index /usr/bin/index COPY cmd/manager/index/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/index"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/index"] diff --git a/dockers/operator/helm/Dockerfile b/dockers/operator/helm/Dockerfile index 0bdbd4ae13..b40530fc31 100644 --- a/dockers/operator/helm/Dockerfile +++ b/dockers/operator/helm/Dockerfile @@ -107,4 +107,4 @@ COPY --from=builder /opt/helm/charts/vald /opt/helm/charts/vald COPY --from=builder /opt/helm/charts/vald-helm-operator /opt/helm/charts/vald-helm-operator # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/helm-operator", "run", "--watches-file=/opt/helm/watches.yaml"] diff --git a/dockers/tools/benchmark/job/Dockerfile b/dockers/tools/benchmark/job/Dockerfile index 69fc24185b..fc778ef054 100644 --- a/dockers/tools/benchmark/job/Dockerfile +++ b/dockers/tools/benchmark/job/Dockerfile @@ -93,4 +93,4 @@ COPY --from=builder /usr/bin/job /usr/bin/job COPY cmd/tools/benchmark/job/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/job"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/job"] diff --git a/dockers/tools/benchmark/operator/Dockerfile b/dockers/tools/benchmark/operator/Dockerfile index b352196472..c225fbaac6 100644 --- a/dockers/tools/benchmark/operator/Dockerfile +++ b/dockers/tools/benchmark/operator/Dockerfile @@ -86,4 +86,4 @@ COPY --from=builder /usr/bin/operator /usr/bin/operator COPY cmd/tools/benchmark/operator/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/operator"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/operator"] diff --git a/dockers/tools/cli/loadtest/Dockerfile b/dockers/tools/cli/loadtest/Dockerfile index 0b80e452bf..9ac5f7dd14 100644 --- a/dockers/tools/cli/loadtest/Dockerfile +++ b/dockers/tools/cli/loadtest/Dockerfile @@ -93,4 +93,4 @@ COPY --from=builder /usr/bin/loadtest /usr/bin/loadtest COPY cmd/tools/cli/loadtest/sample.yaml /etc/server/config.yaml # skipcq: DOK-DL3002 USER nonroot:nonroot -ENTRYPOINT ["/usr/bin/loadtest"] \ No newline at end of file +ENTRYPOINT ["/usr/bin/loadtest"] diff --git a/internal/backoff/option.go b/internal/backoff/option.go index 35e651efca..6266e1ed4c 100644 --- a/internal/backoff/option.go +++ b/internal/backoff/option.go @@ -33,7 +33,6 @@ var defaultOptions = []Option{ WithJitterLimit("1m"), WithBackOffFactor(1.5), WithRetryCount(50), - WithEnableErrorLog(), } // WithInitialDuration returns the option to set the initial duration of backoff. diff --git a/internal/client/v1/client/discoverer/discover_test.go b/internal/client/v1/client/discoverer/discover_test.go index 7c21da5ade..f35fad520c 100644 --- a/internal/client/v1/client/discoverer/discover_test.go +++ b/internal/client/v1/client/discoverer/discover_test.go @@ -1080,7 +1080,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // func Test_client_dnsDiscovery(t *testing.T) { // type args struct { // ctx context.Context -// ech chan<- error // } // type fields struct { // autoconn bool @@ -1131,7 +1130,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_1", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1171,7 +1169,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_2", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1242,7 +1239,7 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // roundRobin: test.fields.roundRobin, // } // -// gotAddrs, err := c.dnsDiscovery(test.args.ctx, test.args.ech) +// gotAddrs, err := c.dnsDiscovery(test.args.ctx) // if err := checkFunc(test.want, gotAddrs, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -1253,7 +1250,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // func Test_client_discover(t *testing.T) { // type args struct { // ctx context.Context -// ech chan<- error // } // type fields struct { // autoconn bool @@ -1300,7 +1296,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_1", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1340,7 +1335,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_2", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1411,7 +1405,7 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // roundRobin: test.fields.roundRobin, // } // -// err := c.discover(test.args.ctx, test.args.ech) +// err := c.discover(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -1422,7 +1416,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // func Test_client_updateDiscoveryInfo(t *testing.T) { // type args struct { // ctx context.Context -// ech chan<- error // } // type fields struct { // autoconn bool @@ -1473,7 +1466,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_1", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1513,7 +1505,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // name: "test_case_2", // args: args { // ctx:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1584,7 +1575,7 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // roundRobin: test.fields.roundRobin, // } // -// gotConnected, err := c.updateDiscoveryInfo(test.args.ctx, test.args.ech) +// gotConnected, err := c.updateDiscoveryInfo(test.args.ctx) // if err := checkFunc(test.want, gotConnected, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -1766,7 +1757,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // type args struct { // ctx context.Context // nodes *payload.Info_Nodes -// ech chan<- error // } // type fields struct { // autoconn bool @@ -1818,7 +1808,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // args: args { // ctx:nil, // nodes:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1859,7 +1848,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // args: args { // ctx:nil, // nodes:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -1930,7 +1918,7 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // roundRobin: test.fields.roundRobin, // } // -// gotAddrs, err := c.discoverAddrs(test.args.ctx, test.args.nodes, test.args.ech) +// gotAddrs, err := c.discoverAddrs(test.args.ctx, test.args.nodes) // if err := checkFunc(test.want, gotAddrs, err); err != nil { // tt.Errorf("error = %v", err) // } @@ -1943,7 +1931,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // ctx context.Context // oldAddrs []string // connectedAddrs []string -// ech chan<- error // } // type fields struct { // autoconn bool @@ -1992,7 +1979,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // ctx:nil, // oldAddrs:nil, // connectedAddrs:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -2034,7 +2020,6 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // ctx:nil, // oldAddrs:nil, // connectedAddrs:nil, -// ech:nil, // }, // fields: fields { // autoconn:false, @@ -2105,7 +2090,7 @@ func Test_client_GetReadClient_concurrent(t *testing.T) { // roundRobin: test.fields.roundRobin, // } // -// err := c.disconnectOldAddrs(test.args.ctx, test.args.oldAddrs, test.args.connectedAddrs, test.args.ech) +// err := c.disconnectOldAddrs(test.args.ctx, test.args.oldAddrs, test.args.connectedAddrs) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } diff --git a/internal/config/benchmark_test.go b/internal/config/benchmark_test.go index a23d2afe5c..95cead747a 100644 --- a/internal/config/benchmark_test.go +++ b/internal/config/benchmark_test.go @@ -19,6 +19,7 @@ package config // type fields struct { // Host string // Port int +// Meta map[string]string // } // type want struct { // want *BenchmarkTarget @@ -45,6 +46,7 @@ package config // fields: fields { // Host:"", // Port:0, +// Meta:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -65,6 +67,7 @@ package config // fields: fields { // Host:"", // Port:0, +// Meta:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -97,6 +100,7 @@ package config // tr := &BenchmarkTarget{ // Host: test.fields.Host, // Port: test.fields.Port, +// Meta: test.fields.Meta, // } // // got := tr.Bind() diff --git a/internal/config/config.go b/internal/config/config.go index ac92e2f5ca..12fe63b451 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -63,7 +63,7 @@ func (c *GlobalConfig) Bind() *GlobalConfig { } // Read returns config struct or error when decoding the configuration file to actually *Config struct. -func Read(path string, cfg any) (err error) { +func Read[T any](path string, cfg T) (err error) { f, err := file.Open(path, os.O_RDONLY, fs.ModePerm) if err != nil { return err @@ -96,28 +96,28 @@ func Read(path string, cfg any) (err error) { // GetActualValue returns the environment variable value if the val has prefix and suffix "_", // if actual value start with file://{path} the return value will read from file // otherwise the val will directly return. -func GetActualValue(val string) (res string) { +func GetActualValue[T ~string](val T) (res T) { if checkPrefixAndSuffix(val, envSymbol, envSymbol) { - val = strings.TrimPrefix(strings.TrimSuffix(val, envSymbol), envSymbol) - if !strings.HasPrefix(val, "$") { + val = T(strings.TrimPrefix(strings.TrimSuffix(string(val), envSymbol), envSymbol)) + if !strings.HasPrefix(string(val), "$") { val = "$" + val } } - res = os.ExpandEnv(val) - if strings.HasPrefix(res, fileValuePrefix) { - body, err := file.ReadFile(strings.TrimPrefix(res, fileValuePrefix)) + r := os.ExpandEnv(string(val)) + if strings.HasPrefix(r, fileValuePrefix) { + body, err := file.ReadFile(strings.TrimPrefix(r, fileValuePrefix)) if err != nil || body == nil { return } - res = conv.Btoa(body) + r = conv.Btoa(body) } - return + return T(r) } // GetActualValues returns the environment variable values if the vals has string slice that has prefix and suffix "_", // if actual value start with file://{path} the return value will read from file // otherwise the val will directly return. -func GetActualValues(vals []string) []string { +func GetActualValues[T ~string](vals []T) []T { for i, val := range vals { vals[i] = GetActualValue(val) } @@ -125,8 +125,8 @@ func GetActualValues(vals []string) []string { } // checkPrefixAndSuffix checks if the str has prefix and suffix. -func checkPrefixAndSuffix(str, pref, suf string) bool { - return strings.HasPrefix(str, pref) && strings.HasSuffix(str, suf) +func checkPrefixAndSuffix[T ~string](str, pref, suf T) bool { + return strings.HasPrefix(string(str), string(pref)) && strings.HasSuffix(string(str), string(suf)) } // ToRawYaml writes the YAML encoding of v to the stream and returns the string written to stream. diff --git a/internal/config/index_deleter_test.go b/internal/config/index_deleter_test.go new file mode 100644 index 0000000000..418f03599d --- /dev/null +++ b/internal/config/index_deleter_test.go @@ -0,0 +1,128 @@ +package config + +// NOT IMPLEMENTED BELOW +// +// func TestIndexDeleter_Bind(t *testing.T) { +// type fields struct { +// IndexID string +// AgentPort int +// AgentName string +// AgentNamespace string +// AgentDNS string +// NodeName string +// Concurrency int +// DeletionPoolSize uint32 +// TargetAddrs []string +// Discoverer *DiscovererClient +// } +// type want struct { +// want *IndexDeleter +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, *IndexDeleter) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got *IndexDeleter) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// IndexID:"", +// AgentPort:0, +// AgentName:"", +// AgentNamespace:"", +// AgentDNS:"", +// NodeName:"", +// Concurrency:0, +// DeletionPoolSize:0, +// TargetAddrs:nil, +// Discoverer:DiscovererClient{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// IndexID:"", +// AgentPort:0, +// AgentName:"", +// AgentNamespace:"", +// AgentDNS:"", +// NodeName:"", +// Concurrency:0, +// DeletionPoolSize:0, +// TargetAddrs:nil, +// Discoverer:DiscovererClient{}, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// ic := &IndexDeleter{ +// IndexID: test.fields.IndexID, +// AgentPort: test.fields.AgentPort, +// AgentName: test.fields.AgentName, +// AgentNamespace: test.fields.AgentNamespace, +// AgentDNS: test.fields.AgentDNS, +// NodeName: test.fields.NodeName, +// Concurrency: test.fields.Concurrency, +// DeletionPoolSize: test.fields.DeletionPoolSize, +// TargetAddrs: test.fields.TargetAddrs, +// Discoverer: test.fields.Discoverer, +// } +// +// got := ic.Bind() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/internal/core/algorithm/usearch/usearch_test.go b/internal/core/algorithm/usearch/usearch_test.go index 9081fe18ed..74e1af2ea7 100644 --- a/internal/core/algorithm/usearch/usearch_test.go +++ b/internal/core/algorithm/usearch/usearch_test.go @@ -667,7 +667,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -695,7 +695,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -798,7 +798,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -829,7 +829,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -929,7 +929,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -957,7 +957,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1062,7 +1062,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1094,7 +1094,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1197,7 +1197,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1228,7 +1228,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1337,7 +1337,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1369,7 +1369,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1472,7 +1472,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1503,7 +1503,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1599,7 +1599,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1627,7 +1627,7 @@ func Test_usearch_Search(t *testing.T) { // expansionSearch:0, // multi:false, // idxPath:"", -// mu:sync.RWMutex{}, +// mu:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, diff --git a/internal/errors/errors.go b/internal/errors/errors.go index 851ca4d829..be03a58190 100644 --- a/internal/errors/errors.go +++ b/internal/errors/errors.go @@ -83,6 +83,10 @@ var ( return Wrapf(err, "failed to output %s logs", str) } + ErrUnimplemented = func(name string) error { + return Errorf("%s is unimplemented", name) + } + // New represents a function to generate the new error with a message. // When the message is nil, it will return nil instead of an error. New = func(msg string) error { diff --git a/internal/errors/errors_test.go b/internal/errors/errors_test.go index 5f7d22ed1d..b7ae1ac55e 100644 --- a/internal/errors/errors_test.go +++ b/internal/errors/errors_test.go @@ -1734,6 +1734,97 @@ func TestJoin(t *testing.T) { // NOT IMPLEMENTED BELOW // +// func Test_is(t *testing.T) { +// type args struct { +// err error +// target error +// targetComparable bool +// } +// type want struct { +// wantSame bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotSame bool) error { +// if !reflect.DeepEqual(gotSame, w.wantSame) { +// return Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotSame, w.wantSame) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// err:nil, +// target:nil, +// targetComparable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// err:nil, +// target:nil, +// targetComparable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotSame := is(test.args.err, test.args.target, test.args.targetComparable) +// if err := checkFunc(test.want, gotSame); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestUnwrap(t *testing.T) { // type args struct { // err error diff --git a/internal/errors/k8s.go b/internal/errors/k8s.go index 6a4c91f103..cfd6d78aec 100644 --- a/internal/errors/k8s.go +++ b/internal/errors/k8s.go @@ -17,4 +17,30 @@ // Package errors provides error types and function package errors -var ErrInvalidReconcilerConfig = New("invalid reconciler config") +var ( + ErrInvalidReconcilerConfig = New("invalid reconciler config") + + ErrPodIsNotRunning = func(namespace, name string) error { + return Errorf("pod %s/%s is not running", namespace, name) + } + + ErrPortForwardAddressNotFound = New("port forward address not found") + + ErrPortForwardPortPairNotFound = New("port forward port pair not found") + + ErrKubernetesClientNotFound = New("kubernetes client not found") + + ErrStatusPatternNeverMatched = New("status pattern never matched") + + ErrUnsupportedKubernetesResourceType = func(obj any) error { + return Errorf("unsupported kubernetes resource type %T", obj) + } + + ErrPodTemplateNotFound = New("pod template not found") + + ErrNoAvailablePods = New("no available pods") + + ErrUndefinedNamespace = New("Undefined namespace") + + ErrUndefinedService = New("Undefined service") +) diff --git a/internal/info/info_test.go b/internal/info/info_test.go index b1c7345aed..900ebb09f6 100644 --- a/internal/info/info_test.go +++ b/internal/info/info_test.go @@ -336,10 +336,11 @@ package info // // func Test_info_String(t *testing.T) { // type fields struct { -// baseURL string -// detail Detail -// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) -// rtFuncForPC func(pc uintptr) *runtime.Func +// baseURL string +// detail Detail +// valdReplacer *strings.Replacer +// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) +// rtFuncForPC func(pc uintptr) *runtime.Func // } // type want struct { // want string @@ -366,6 +367,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -388,6 +390,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -420,10 +423,11 @@ package info // checkFunc = defaultCheckFunc // } // i := &info{ -// baseURL: test.fields.baseURL, -// detail: test.fields.detail, -// rtCaller: test.fields.rtCaller, -// rtFuncForPC: test.fields.rtFuncForPC, +// baseURL: test.fields.baseURL, +// detail: test.fields.detail, +// valdReplacer: test.fields.valdReplacer, +// rtCaller: test.fields.rtCaller, +// rtFuncForPC: test.fields.rtFuncForPC, // } // // got := i.String() @@ -584,10 +588,11 @@ package info // // func Test_info_Get(t *testing.T) { // type fields struct { -// baseURL string -// detail Detail -// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) -// rtFuncForPC func(pc uintptr) *runtime.Func +// baseURL string +// detail Detail +// valdReplacer *strings.Replacer +// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) +// rtFuncForPC func(pc uintptr) *runtime.Func // } // type want struct { // want Detail @@ -614,6 +619,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -636,6 +642,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -668,10 +675,11 @@ package info // checkFunc = defaultCheckFunc // } // i := &info{ -// baseURL: test.fields.baseURL, -// detail: test.fields.detail, -// rtCaller: test.fields.rtCaller, -// rtFuncForPC: test.fields.rtFuncForPC, +// baseURL: test.fields.baseURL, +// detail: test.fields.detail, +// valdReplacer: test.fields.valdReplacer, +// rtCaller: test.fields.rtCaller, +// rtFuncForPC: test.fields.rtFuncForPC, // } // // got := i.Get() @@ -684,10 +692,11 @@ package info // // func Test_info_getDetail(t *testing.T) { // type fields struct { -// baseURL string -// detail Detail -// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) -// rtFuncForPC func(pc uintptr) *runtime.Func +// baseURL string +// detail Detail +// valdReplacer *strings.Replacer +// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) +// rtFuncForPC func(pc uintptr) *runtime.Func // } // type want struct { // want Detail @@ -714,6 +723,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -736,6 +746,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -768,10 +779,11 @@ package info // checkFunc = defaultCheckFunc // } // i := info{ -// baseURL: test.fields.baseURL, -// detail: test.fields.detail, -// rtCaller: test.fields.rtCaller, -// rtFuncForPC: test.fields.rtFuncForPC, +// baseURL: test.fields.baseURL, +// detail: test.fields.detail, +// valdReplacer: test.fields.valdReplacer, +// rtCaller: test.fields.rtCaller, +// rtFuncForPC: test.fields.rtFuncForPC, // } // // got := i.getDetail() @@ -784,10 +796,11 @@ package info // // func Test_info_prepare(t *testing.T) { // type fields struct { -// baseURL string -// detail Detail -// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) -// rtFuncForPC func(pc uintptr) *runtime.Func +// baseURL string +// detail Detail +// valdReplacer *strings.Replacer +// rtCaller func(skip int) (pc uintptr, file string, line int, ok bool) +// rtFuncForPC func(pc uintptr) *runtime.Func // } // type want struct{} // type test struct { @@ -809,6 +822,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -831,6 +845,7 @@ package info // fields: fields { // baseURL:"", // detail:Detail{}, +// valdReplacer:nil, // rtCaller:nil, // rtFuncForPC:nil, // }, @@ -863,10 +878,11 @@ package info // checkFunc = defaultCheckFunc // } // i := &info{ -// baseURL: test.fields.baseURL, -// detail: test.fields.detail, -// rtCaller: test.fields.rtCaller, -// rtFuncForPC: test.fields.rtFuncForPC, +// baseURL: test.fields.baseURL, +// detail: test.fields.detail, +// valdReplacer: test.fields.valdReplacer, +// rtCaller: test.fields.rtCaller, +// rtFuncForPC: test.fields.rtFuncForPC, // } // // i.prepare() @@ -976,3 +992,103 @@ package info // }) // } // } +// +// func TestStackTrace_ShortString(t *testing.T) { +// type fields struct { +// URL string +// FuncName string +// File string +// Line int +// } +// type want struct { +// want string +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, string) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got string) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// URL:"", +// FuncName:"", +// File:"", +// Line:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// URL:"", +// FuncName:"", +// File:"", +// Line:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// s := StackTrace{ +// URL: test.fields.URL, +// FuncName: test.fields.FuncName, +// File: test.fields.File, +// Line: test.fields.Line, +// } +// +// got := s.ShortString() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/iter/iter.go b/internal/iter/iter.go new file mode 100644 index 0000000000..9f74258c3e --- /dev/null +++ b/internal/iter/iter.go @@ -0,0 +1,152 @@ +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package iter +package iter + +import ( + "context" + "iter" +) + +// ------------------------- +// Iterator Type Definition | +// ------------------------- +// Cycle provides an iterator abstraction over a slice. +type Cycle[S ~[]E, E any] interface { + At(i uint64) E + ForEach(ctx context.Context, fn func(uint64, E) bool) + Len() uint64 + Raw() S + Seq(context.Context) iter.Seq[E] + Seq2(context.Context) iter.Seq2[uint64, E] + Indexes(context.Context) iter.Seq[uint64] + Values(context.Context) iter.Seq[E] +} + +// cycle provides an iterator abstraction over a slice. +// It applies an optional modFunc to transform each element on‑the‑fly without precomputing the entire dataset. +type cycle[S ~[]E, E any] struct { + start uint64 // Starting index after applying the offset. + num uint64 // Total number of elements to iterate over. + size uint64 // Size of the original slice. + offset uint64 // Provided offset value. + array S // The original data slice. + modFunc func(uint64, E) E // Optional function to modify an element based on its overall index. +} + +// New creates a new cycle iterator instance. It validates the input array and computes the starting index (offset modulo array size). +func NewCycle[S ~[]E, E any](array S, num, offset uint64, mod func(uint64, E) E) Cycle[S, E] { + if array == nil { + return nil + } + size := uint64(len(array)) + if size == 0 { + return nil + } + return &cycle[S, E]{ + start: offset % size, + num: num, + size: size, + offset: offset, + array: array, + modFunc: mod, + } +} + +// At returns the element at logical index i. +// If modFunc is provided, it applies the function on‑the‑fly. +func (c *cycle[_, E]) At(i uint64) E { + idx := (c.start + i) % c.size + if c.modFunc != nil { + return c.modFunc(i, c.array[idx]) + } + return c.array[idx] +} + +// Seq2 returns an iterator sequence (iter.Seq2) that yields each element along with its index. +func (c *cycle[_, E]) Seq2(ctx context.Context) iter.Seq2[uint64, E] { + return func(yield func(uint64, E) bool) { + for i := uint64(0); i < c.num; i++ { + select { + case <-ctx.Done(): + return + default: + } + if !yield(i, c.At(i)) { + return + } + } + } +} + +func (c *cycle[_, E]) Seq(ctx context.Context) iter.Seq[E] { + return c.Values(ctx) +} + +// Values returns an iterator sequence (iter.Seq) that yields the values (without indexes). +func (c *cycle[_, E]) Values(ctx context.Context) iter.Seq[E] { + return func(yield func(E) bool) { + for i := uint64(0); i < c.num; i++ { + select { + case <-ctx.Done(): + return + default: + } + if !yield(c.At(i)) { + return + } + } + } +} + +// Indexes returns an iterator sequence (iter.Seq) that yields the indexes. +func (c cycle[_, _]) Indexes(ctx context.Context) iter.Seq[uint64] { + return func(yield func(uint64) bool) { + for i := uint64(0); i < c.num; i++ { + select { + case <-ctx.Done(): + return + default: + } + if !yield(i) { + return + } + } + } +} + +func (c cycle[_, E]) ForEach(ctx context.Context, fn func(uint64, E) bool) { + for i := uint64(0); i < c.num; i++ { + select { + case <-ctx.Done(): + return + default: + } + if !fn(i, c.At(i)) { + return + } + } +} + +func (c cycle[S, _]) Raw() S { + return c.array +} + +// Len returns the total number of elements in the iterator. +func (c cycle[_, _]) Len() uint64 { + return c.num +} diff --git a/internal/net/dialer_test.go b/internal/net/dialer_test.go index 2cf8ffe9ac..8a5a870c19 100644 --- a/internal/net/dialer_test.go +++ b/internal/net/dialer_test.go @@ -1989,3 +1989,91 @@ func Test_dialer_tlsHandshake(t *testing.T) { // }) // } // } +// +// func Test_isQUICDial(t *testing.T) { +// type args struct { +// network string +// addr string +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// network:"", +// addr:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// network:"", +// addr:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := isQUICDial(test.args.network, test.args.addr) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/grpc/errdetails/errdetails.go b/internal/net/grpc/errdetails/errdetails.go index 1636f369b4..dbc4de9cf7 100644 --- a/internal/net/grpc/errdetails/errdetails.go +++ b/internal/net/grpc/errdetails/errdetails.go @@ -23,6 +23,7 @@ import ( "strconv" "github.com/vdaas/vald/apis/grpc/v1/rpc/errdetails" + "github.com/vdaas/vald/internal/conv" "github.com/vdaas/vald/internal/encoding/json" "github.com/vdaas/vald/internal/info" "github.com/vdaas/vald/internal/log" @@ -31,6 +32,7 @@ import ( "github.com/vdaas/vald/internal/strings" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc/status" + "google.golang.org/protobuf/encoding/protojson" ) type ( @@ -75,6 +77,10 @@ var ( RetryInfoMessageName = string(new(RetryInfo).ProtoReflect().Descriptor().FullName().Name()) ) +type Details struct { + Details []Detail `json:"details,omitempty" yaml:"details"` +} + type Detail struct { TypeURL string `json:"type_url,omitempty" yaml:"type_url"` Message proto.Message `json:"message,omitempty" yaml:"message"` @@ -84,79 +90,86 @@ func (d *Detail) MarshalJSON() (body []byte, err error) { if d == nil { return nil, nil } - switch strings.TrimPrefix(strings.TrimPrefix(d.TypeURL, typePrefix), typePrefixV1) { + typeName := strings.TrimPrefix(strings.TrimPrefix(d.TypeURL, typePrefix), typePrefixV1) + switch typeName { case DebugInfoMessageName: m, ok := d.Message.(*DebugInfo) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case ErrorInfoMessageName: m, ok := d.Message.(*ErrorInfo) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case BadRequestFieldViolationMessageName: m, ok := d.Message.(*BadRequestFieldViolation) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case BadRequestMessageName: m, ok := d.Message.(*BadRequest) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case LocalizedMessageMessageName: m, ok := d.Message.(*LocalizedMessage) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case PreconditionFailureViolationMessageName: m, ok := d.Message.(*PreconditionFailureViolation) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case PreconditionFailureMessageName: m, ok := d.Message.(*PreconditionFailure) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case HelpLinkMessageName: m, ok := d.Message.(*HelpLink) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case HelpMessageName: m, ok := d.Message.(*Help) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case QuotaFailureViolationMessageName: m, ok := d.Message.(*QuotaFailureViolation) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case QuotaFailureMessageName: m, ok := d.Message.(*QuotaFailure) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case RequestInfoMessageName: m, ok := d.Message.(*RequestInfo) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case ResourceInfoMessageName: m, ok := d.Message.(*ResourceInfo) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } case RetryInfoMessageName: m, ok := d.Message.(*RetryInfo) if ok { - return json.Marshal(m) + body, err = m.MarshalJSON() } + default: + body, err = protojson.Marshal(d.Message) } - return json.Marshal(d) + if err != nil || body == nil { + log.Warnf("failed to Marshal type: %s, object %#v to JSON body %v, error: %v", typeName, d, body, err) + return nil, err + } + return body, nil } func decodeDetails(objs ...any) (details []Detail) { @@ -299,14 +312,14 @@ func Serialize(objs ...any) string { case 0: return fmt.Sprint(objs...) case 1: - b, err = json.Marshal(msgs[0]) + b, err = msgs[0].MarshalJSON() default: - b, err = json.Marshal(msgs) + b, err = json.Marshal(&Details{Details: msgs}) } - if err != nil { + if err != nil || b == nil { return fmt.Sprint(objs...) } - return string(b) + return conv.Btoa(b) } func AnyToErrorDetail(a *types.Any) proto.Message { @@ -314,7 +327,8 @@ func AnyToErrorDetail(a *types.Any) proto.Message { return nil } var err error - switch strings.TrimPrefix(strings.TrimPrefix(a.GetTypeUrl(), typePrefix), typePrefixV1) { + typeName := strings.TrimPrefix(strings.TrimPrefix(a.GetTypeUrl(), typePrefix), typePrefixV1) + switch typeName { case DebugInfoMessageName: var m DebugInfo err = types.UnmarshalAny(a, &m) @@ -399,9 +413,15 @@ func AnyToErrorDetail(a *types.Any) proto.Message { if err == nil { return &m } + default: + m, err := a.UnmarshalNew() + if err == nil { + return m + } + } if err != nil { - log.Warn(err) + log.Warnf("failed to Unmarshal type: %s, object %#v to JSON error: %v", typeName, a, err) } return a.ProtoReflect().Interface() } diff --git a/internal/net/grpc/metadata_test.go b/internal/net/grpc/metadata_test.go index 72a7a1434a..3cddd00644 100644 --- a/internal/net/grpc/metadata_test.go +++ b/internal/net/grpc/metadata_test.go @@ -15,6 +15,91 @@ package grpc // NOT IMPLEMENTED BELOW // +// func TestNewMetadata(t *testing.T) { +// type args struct { +// m map[string]string +// } +// type want struct { +// want MD +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, MD) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got MD) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// m:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// m:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := NewMetadata(test.args.m) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestNewOutgoingContext(t *testing.T) { // type args struct { // ctx context.Context diff --git a/internal/net/grpc/option_test.go b/internal/net/grpc/option_test.go index a0115312de..0fa02acd09 100644 --- a/internal/net/grpc/option_test.go +++ b/internal/net/grpc/option_test.go @@ -529,9 +529,9 @@ package grpc // } // } // -// func TestWithDialOptions(t *testing.T) { +// func TestWithBackoffMaxDelay(t *testing.T) { // type args struct { -// opts []grpc.DialOption +// dur string // } // type want struct { // want Option @@ -556,7 +556,7 @@ package grpc // { // name: "test_case_1", // args: args { -// opts:nil, +// dur:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -575,7 +575,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// opts:nil, +// dur:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -606,7 +606,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithDialOptions(test.args.opts...) +// got := WithBackoffMaxDelay(test.args.dur) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -614,7 +614,7 @@ package grpc // } // } // -// func TestWithBackoffMaxDelay(t *testing.T) { +// func TestWithBackoffBaseDelay(t *testing.T) { // type args struct { // dur string // } @@ -691,7 +691,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithBackoffMaxDelay(test.args.dur) +// got := WithBackoffBaseDelay(test.args.dur) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -699,9 +699,9 @@ package grpc // } // } // -// func TestWithBackoffBaseDelay(t *testing.T) { +// func TestWithBackoffMultiplier(t *testing.T) { // type args struct { -// dur string +// m float64 // } // type want struct { // want Option @@ -726,7 +726,7 @@ package grpc // { // name: "test_case_1", // args: args { -// dur:"", +// m:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -745,7 +745,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// dur:"", +// m:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -776,7 +776,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithBackoffBaseDelay(test.args.dur) +// got := WithBackoffMultiplier(test.args.m) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -784,9 +784,9 @@ package grpc // } // } // -// func TestWithBackoffMultiplier(t *testing.T) { +// func TestWithBackoffJitter(t *testing.T) { // type args struct { -// m float64 +// j float64 // } // type want struct { // want Option @@ -811,7 +811,7 @@ package grpc // { // name: "test_case_1", // args: args { -// m:0, +// j:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -830,7 +830,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// m:0, +// j:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -861,7 +861,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithBackoffMultiplier(test.args.m) +// got := WithBackoffJitter(test.args.j) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -869,9 +869,9 @@ package grpc // } // } // -// func TestWithBackoffJitter(t *testing.T) { +// func TestWithMinConnectTimeout(t *testing.T) { // type args struct { -// j float64 +// dur string // } // type want struct { // want Option @@ -896,7 +896,7 @@ package grpc // { // name: "test_case_1", // args: args { -// j:0, +// dur:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -915,7 +915,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// j:0, +// dur:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -946,7 +946,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithBackoffJitter(test.args.j) +// got := WithMinConnectTimeout(test.args.dur) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -954,9 +954,9 @@ package grpc // } // } // -// func TestWithMinConnectTimeout(t *testing.T) { +// func TestWithErrGroup(t *testing.T) { // type args struct { -// dur string +// eg errgroup.Group // } // type want struct { // want Option @@ -981,7 +981,7 @@ package grpc // { // name: "test_case_1", // args: args { -// dur:"", +// eg:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1000,7 +1000,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// dur:"", +// eg:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1031,7 +1031,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithMinConnectTimeout(test.args.dur) +// got := WithErrGroup(test.args.eg) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1039,9 +1039,9 @@ package grpc // } // } // -// func TestWithCallOptions(t *testing.T) { +// func TestWithBackoff(t *testing.T) { // type args struct { -// opts []grpc.CallOption +// bo backoff.Backoff // } // type want struct { // want Option @@ -1066,7 +1066,7 @@ package grpc // { // name: "test_case_1", // args: args { -// opts:nil, +// bo:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1085,7 +1085,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// opts:nil, +// bo:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1116,7 +1116,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithCallOptions(test.args.opts...) +// got := WithBackoff(test.args.bo) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1124,9 +1124,9 @@ package grpc // } // } // -// func TestWithErrGroup(t *testing.T) { +// func TestWithCircuitBreaker(t *testing.T) { // type args struct { -// eg errgroup.Group +// cb circuitbreaker.CircuitBreaker // } // type want struct { // want Option @@ -1151,7 +1151,7 @@ package grpc // { // name: "test_case_1", // args: args { -// eg:nil, +// cb:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1170,7 +1170,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// eg:nil, +// cb:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1201,7 +1201,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithErrGroup(test.args.eg) +// got := WithCircuitBreaker(test.args.cb) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1209,9 +1209,9 @@ package grpc // } // } // -// func TestWithBackoff(t *testing.T) { +// func TestWithCallOptions(t *testing.T) { // type args struct { -// bo backoff.Backoff +// opts []grpc.CallOption // } // type want struct { // want Option @@ -1236,7 +1236,7 @@ package grpc // { // name: "test_case_1", // args: args { -// bo:nil, +// opts:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1255,7 +1255,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// bo:nil, +// opts:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1286,7 +1286,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithBackoff(test.args.bo) +// got := WithCallOptions(test.args.opts...) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1294,9 +1294,9 @@ package grpc // } // } // -// func TestWithCircuitBreaker(t *testing.T) { +// func TestWithCallContentSubtype(t *testing.T) { // type args struct { -// cb circuitbreaker.CircuitBreaker +// contentSubtype string // } // type want struct { // want Option @@ -1321,7 +1321,7 @@ package grpc // { // name: "test_case_1", // args: args { -// cb:nil, +// contentSubtype:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1340,7 +1340,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// cb:nil, +// contentSubtype:"", // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1371,7 +1371,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithCircuitBreaker(test.args.cb) +// got := WithCallContentSubtype(test.args.contentSubtype) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1379,9 +1379,9 @@ package grpc // } // } // -// func TestWithWaitForReady(t *testing.T) { +// func TestWithMaxRecvMsgSize(t *testing.T) { // type args struct { -// flg bool +// size int // } // type want struct { // want Option @@ -1406,7 +1406,7 @@ package grpc // { // name: "test_case_1", // args: args { -// flg:false, +// size:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1425,7 +1425,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// flg:false, +// size:0, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1456,7 +1456,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithWaitForReady(test.args.flg) +// got := WithMaxRecvMsgSize(test.args.size) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1464,7 +1464,7 @@ package grpc // } // } // -// func TestWithMaxRetryRPCBufferSize(t *testing.T) { +// func TestWithMaxSendMsgSize(t *testing.T) { // type args struct { // size int // } @@ -1541,7 +1541,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithMaxRetryRPCBufferSize(test.args.size) +// got := WithMaxSendMsgSize(test.args.size) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1549,7 +1549,7 @@ package grpc // } // } // -// func TestWithMaxRecvMsgSize(t *testing.T) { +// func TestWithMaxRetryRPCBufferSize(t *testing.T) { // type args struct { // size int // } @@ -1626,7 +1626,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithMaxRecvMsgSize(test.args.size) +// got := WithMaxRetryRPCBufferSize(test.args.size) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1634,9 +1634,9 @@ package grpc // } // } // -// func TestWithMaxSendMsgSize(t *testing.T) { +// func TestWithWaitForReady(t *testing.T) { // type args struct { -// size int +// flg bool // } // type want struct { // want Option @@ -1661,7 +1661,7 @@ package grpc // { // name: "test_case_1", // args: args { -// size:0, +// flg:false, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1680,7 +1680,7 @@ package grpc // return test { // name: "test_case_2", // args: args { -// size:0, +// flg:false, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -1711,7 +1711,92 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithMaxSendMsgSize(test.args.size) +// got := WithWaitForReady(test.args.flg) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithDialOptions(t *testing.T) { +// type args struct { +// opts []grpc.DialOption +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithDialOptions(test.args.opts...) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -1891,7 +1976,7 @@ package grpc // // func TestWithInitialWindowSize(t *testing.T) { // type args struct { -// size int +// size int32 // } // type want struct { // want Option @@ -1976,7 +2061,7 @@ package grpc // // func TestWithInitialConnectionWindowSize(t *testing.T) { // type args struct { -// size int +// size int32 // } // type want struct { // want Option @@ -2322,7 +2407,8 @@ package grpc // // func TestWithDialer(t *testing.T) { // type args struct { -// der net.Dialer +// network string +// der net.Dialer // } // type want struct { // want Option @@ -2347,6 +2433,7 @@ package grpc // { // name: "test_case_1", // args: args { +// network:"", // der:nil, // }, // want: want{}, @@ -2366,6 +2453,7 @@ package grpc // return test { // name: "test_case_2", // args: args { +// network:"", // der:nil, // }, // want: want{}, @@ -2397,7 +2485,7 @@ package grpc // checkFunc = defaultCheckFunc // } // -// got := WithDialer(test.args.der) +// got := WithDialer(test.args.network, test.args.der) // if err := checkFunc(test.want, got); err != nil { // tt.Errorf("error = %v", err) // } @@ -2490,6 +2578,601 @@ package grpc // } // } // +// func TestWithAuthority(t *testing.T) { +// type args struct { +// a string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// a:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// a:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithAuthority(test.args.a) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithDisableRetry(t *testing.T) { +// type args struct { +// disable bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// disable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// disable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithDisableRetry(test.args.disable) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithIdleTimeout(t *testing.T) { +// type args struct { +// dur string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// dur:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// dur:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithIdleTimeout(test.args.dur) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxCallAttempts(t *testing.T) { +// type args struct { +// n int +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxCallAttempts(test.args.n) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithMaxHeaderListSize(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMaxHeaderListSize(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithSharedWriteBuffer(t *testing.T) { +// type args struct { +// enable bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// enable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// enable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithSharedWriteBuffer(test.args.enable) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithUserAgent(t *testing.T) { +// type args struct { +// ua string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ua:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ua:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithUserAgent(test.args.ua) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func TestWithClientInterceptors(t *testing.T) { // type args struct { // names []string diff --git a/internal/net/grpc/pool/pool_test.go b/internal/net/grpc/pool/pool_test.go index 1cc4cccd43..ea4e6f2a55 100644 --- a/internal/net/grpc/pool/pool_test.go +++ b/internal/net/grpc/pool/pool_test.go @@ -2872,8 +2872,8 @@ package pool // reconnectHash atomic.Pointer[string] // } // type want struct { -// want *ClientConn -// want1 bool +// wantConn *ClientConn +// wantOk bool // } // type test struct { // name string @@ -2884,12 +2884,12 @@ package pool // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got *ClientConn, got1 bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotConn *ClientConn, gotOk bool) error { +// if !reflect.DeepEqual(gotConn, w.wantConn) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotConn, w.wantConn) // } -// if !reflect.DeepEqual(got1, w.want1) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got1, w.want1) +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -3006,8 +3006,8 @@ package pool // reconnectHash: test.fields.reconnectHash, // } // -// got, got1 := p.Get(test.args.ctx) -// if err := checkFunc(test.want, got, got1); err != nil { +// gotConn, gotOk := p.Get(test.args.ctx) +// if err := checkFunc(test.want, gotConn, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) @@ -3040,24 +3040,28 @@ package pool // reconnectHash atomic.Pointer[string] // } // type want struct { -// want *ClientConn -// want1 bool +// wantIdx int +// wantConn *ClientConn +// wantOk bool // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *ClientConn, bool) error +// checkFunc func(want, int, *ClientConn, bool) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got *ClientConn, got1 bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// defaultCheckFunc := func(w want, gotIdx int, gotConn *ClientConn, gotOk bool) error { +// if !reflect.DeepEqual(gotIdx, w.wantIdx) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotIdx, w.wantIdx) +// } +// if !reflect.DeepEqual(gotConn, w.wantConn) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotConn, w.wantConn) // } -// if !reflect.DeepEqual(got1, w.want1) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got1, w.want1) +// if !reflect.DeepEqual(gotOk, w.wantOk) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotOk, w.wantOk) // } // return nil // } @@ -3178,8 +3182,8 @@ package pool // reconnectHash: test.fields.reconnectHash, // } // -// got, got1 := p.getHealthyConn(test.args.ctx, test.args.cnt, test.args.retry) -// if err := checkFunc(test.want, got, got1); err != nil { +// gotIdx, gotConn, gotOk := p.getHealthyConn(test.args.ctx, test.args.cnt, test.args.retry) +// if err := checkFunc(test.want, gotIdx, gotConn, gotOk); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/internal/net/grpc/server_test.go b/internal/net/grpc/server_test.go index 41c077539c..5da6a282b5 100644 --- a/internal/net/grpc/server_test.go +++ b/internal/net/grpc/server_test.go @@ -26,6 +26,7 @@ import ( "github.com/vdaas/vald/internal/test/goleak" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + _ "google.golang.org/grpc/encoding/proto" "google.golang.org/grpc/keepalive" ) @@ -833,3 +834,343 @@ func TestKeepaliveEnforcementPolicy(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestMaxConcurrentStreams(t *testing.T) { +// type args struct { +// n uint32 +// } +// type want struct { +// want ServerOption +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, ServerOption) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got ServerOption) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := MaxConcurrentStreams(test.args.n) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestNumStreamWorkers(t *testing.T) { +// type args struct { +// n uint32 +// } +// type want struct { +// want ServerOption +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, ServerOption) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got ServerOption) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// n:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := NumStreamWorkers(test.args.n) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestSharedWriteBuffer(t *testing.T) { +// type args struct { +// val bool +// } +// type want struct { +// want ServerOption +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, ServerOption) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got ServerOption) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// val:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// val:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := SharedWriteBuffer(test.args.val) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWaitForHandlers(t *testing.T) { +// type args struct { +// val bool +// } +// type want struct { +// want ServerOption +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, ServerOption) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got ServerOption) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// val:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// val:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WaitForHandlers(test.args.val) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/grpc/stream.go b/internal/net/grpc/stream.go index 0e705d6eec..bcd78734f2 100644 --- a/internal/net/grpc/stream.go +++ b/internal/net/grpc/stream.go @@ -168,7 +168,7 @@ func BidirectionalStream[Q, R any]( // BidirectionalStreamClient is gRPC client stream. func BidirectionalStreamClient[S, R any]( - stream ClientStream, sendDataProvider func() *S, callBack func(*R, error) bool, + stream ClientStream, concurrency int, sendDataProvider func() *S, callBack func(*R, error) bool, ) (err error) { if stream == nil { return errors.ErrGRPCClientStreamNotFound @@ -176,6 +176,9 @@ func BidirectionalStreamClient[S, R any]( ctx, cancel := context.WithCancel(stream.Context()) eg, ctx := errgroup.New(ctx) + if concurrency > 0 { + eg.SetLimit(concurrency) + } eg.Go(safety.RecoverFunc(func() (err error) { for { @@ -206,6 +209,7 @@ func BidirectionalStreamClient[S, R any]( }() return func() (err error) { + wg := sync.WaitGroup{} for { select { case <-ctx.Done(): @@ -213,6 +217,7 @@ func BidirectionalStreamClient[S, R any]( default: data := sendDataProvider() if data == nil { + wg.Wait() err = stream.CloseSend() cancel() if err != nil { @@ -220,11 +225,15 @@ func BidirectionalStreamClient[S, R any]( } return eg.Wait() } - - err = stream.SendMsg(*data) - if err != nil { - return err - } + wg.Add(1) + eg.Go(safety.RecoverFunc(func() (err error) { + defer wg.Done() + err = stream.SendMsg(*data) + if err != nil { + return err + } + return nil + })) } } }() diff --git a/internal/net/grpc/stream_test.go b/internal/net/grpc/stream_test.go index 3d64c17206..9177fb932d 100644 --- a/internal/net/grpc/stream_test.go +++ b/internal/net/grpc/stream_test.go @@ -203,10 +203,10 @@ func TestBidirectionalStream(t *testing.T) { // // func TestBidirectionalStreamClient(t *testing.T) { // type args struct { -// stream ClientStream -// dataProvider func() any -// newData func() any -// f func(any, error) +// stream ClientStream +// concurrency int +// sendDataProvider func() *S +// callBack func(*R, error) bool // } // type want struct { // err error @@ -232,9 +232,9 @@ func TestBidirectionalStream(t *testing.T) { // name: "test_case_1", // args: args { // stream:nil, -// dataProvider:nil, -// newData:nil, -// f:nil, +// concurrency:0, +// sendDataProvider:nil, +// callBack:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -254,9 +254,9 @@ func TestBidirectionalStream(t *testing.T) { // name: "test_case_2", // args: args { // stream:nil, -// dataProvider:nil, -// newData:nil, -// f:nil, +// concurrency:0, +// sendDataProvider:nil, +// callBack:nil, // }, // want: want{}, // checkFunc: defaultCheckFunc, @@ -287,7 +287,7 @@ func TestBidirectionalStream(t *testing.T) { // checkFunc = defaultCheckFunc // } // -// err := BidirectionalStreamClient(test.args.stream, test.args.dataProvider, test.args.newData, test.args.f) +// err := BidirectionalStreamClient(test.args.stream, test.args.concurrency, test.args.sendDataProvider, test.args.callBack) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } diff --git a/internal/net/http/client/client.go b/internal/net/http/client/client.go index 40e45172b5..0fcb9953be 100644 --- a/internal/net/http/client/client.go +++ b/internal/net/http/client/client.go @@ -34,10 +34,22 @@ type transport struct { // New initializes the HTTP2 transport with exponential backoff and returns the HTTP client for it, or returns any error occurred. func New(opts ...Option) (*http.Client, error) { - tr := new(transport) - tr.Transport = new(http.Transport) + return NewWithTransport(http.DefaultTransport, opts...) +} +// NewWithTransport initializes the HTTP2 transport with the given RoundTripper and +// exponential backoff, returning the HTTP client for it, or any error that occurred. +func NewWithTransport(rt http.RoundTripper, opts ...Option) (*http.Client, error) { + tr := new(transport) + t, ok := rt.(*http.Transport) + if ok { + tr.Transport = t.Clone() + } else { + // Initialize with default transport if the provided one is not *http.Transport + tr.Transport = http.DefaultTransport.(*http.Transport).Clone() + } for _, opt := range append(defaultOptions, opts...) { + // ... existing code ... if err := opt(tr); err != nil { werr := errors.ErrOptionFailed(err, reflect.ValueOf(opt)) e := new(errors.ErrCriticalOption) diff --git a/internal/net/http/client/client_test.go b/internal/net/http/client/client_test.go index b3cbd678f3..8a17bf7310 100644 --- a/internal/net/http/client/client_test.go +++ b/internal/net/http/client/client_test.go @@ -211,3 +211,95 @@ func TestNew(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestNewWithTransport(t *testing.T) { +// type args struct { +// rt http.RoundTripper +// opts []Option +// } +// type want struct { +// want *http.Client +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, *http.Client, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got *http.Client, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// rt:nil, +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// rt:nil, +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := NewWithTransport(test.args.rt, test.args.opts...) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/net.go b/internal/net/net.go index 4d2a8647be..e2b5b18b2f 100644 --- a/internal/net/net.go +++ b/internal/net/net.go @@ -196,7 +196,15 @@ func Parse(addr string) (host string, port uint16, isLocal, isIPv4, isIPv6 bool, ip, nerr := netip.ParseAddr(host) if nerr != nil { - log.Debugf("host: %s,\tport: %d,\tip: %#v,\terror: %v", host, port, ip, nerr) + ips, err := DefaultResolver.LookupIPAddr(context.Background(), host) + if err != nil || ips == nil || len(ips) == 0 { + log.Debugf("host: %s,\tport: %d,\tip: %#v,\tParseAddr error: %v, LookupIPAddr error:", host, port, ip, nerr, err) + } else { + ip, nerr = netip.ParseAddr(ips[0].String()) + if nerr != nil { + log.Debugf("host: %s,\tport: %d,\tip: %#v,\tParseAddr error: %v", host, port, ip, nerr) + } + } } // return host and port and flags diff --git a/internal/net/net_test.go b/internal/net/net_test.go index 28ab0ee575..96055c505e 100644 --- a/internal/net/net_test.go +++ b/internal/net/net_test.go @@ -1197,3 +1197,173 @@ func TestJoinHostPort(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestIsUDP(t *testing.T) { +// type args struct { +// network string +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// network:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// network:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := IsUDP(test.args.network) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestIsTCP(t *testing.T) { +// type args struct { +// network string +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// network:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// network:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := IsTCP(test.args.network) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/net/quic/conn_test.go b/internal/net/quic/conn_test.go new file mode 100644 index 0000000000..0da39db2dd --- /dev/null +++ b/internal/net/quic/conn_test.go @@ -0,0 +1,483 @@ +package quic + +// NOT IMPLEMENTED BELOW +// +// func TestNewConn(t *testing.T) { +// type args struct { +// ctx context.Context +// conn quic.Connection +// } +// type want struct { +// want net.Conn +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, net.Conn, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got net.Conn, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// conn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// conn:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := NewConn(test.args.ctx, test.args.conn) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestConn_Close(t *testing.T) { +// type fields struct { +// Connection quic.Connection +// Stream quic.Stream +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// Connection:nil, +// Stream:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// Connection:nil, +// Stream:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// c := &Conn{ +// Connection: test.fields.Connection, +// Stream: test.fields.Stream, +// } +// +// err := c.Close() +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestDialContext(t *testing.T) { +// type args struct { +// ctx context.Context +// addr string +// tcfg *tls.Config +// } +// type want struct { +// want net.Conn +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, net.Conn, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got net.Conn, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := DialContext(test.args.ctx, test.args.addr, test.args.tcfg) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_qconn_dialQuicContext(t *testing.T) { +// type args struct { +// ctx context.Context +// addr string +// tcfg *tls.Config +// } +// type fields struct { +// connectionCache sync.Map[string, quic.Connection] +// } +// type want struct { +// want net.Conn +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, net.Conn, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got net.Conn, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// fields: fields { +// connectionCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// fields: fields { +// connectionCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// q := &qconn{ +// connectionCache: test.fields.connectionCache, +// } +// +// got, err := q.dialQuicContext(test.args.ctx, test.args.addr, test.args.tcfg) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_qconn_Close(t *testing.T) { +// type fields struct { +// connectionCache sync.Map[string, quic.Connection] +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// connectionCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// connectionCache:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// q := &qconn{ +// connectionCache: test.fields.connectionCache, +// } +// +// err := q.Close() +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/internal/net/quic/listener_test.go b/internal/net/quic/listener_test.go new file mode 100644 index 0000000000..e53c2c449b --- /dev/null +++ b/internal/net/quic/listener_test.go @@ -0,0 +1,196 @@ +package quic + +// NOT IMPLEMENTED BELOW +// +// func TestListen(t *testing.T) { +// type args struct { +// ctx context.Context +// addr string +// tcfg *tls.Config +// } +// type want struct { +// want net.Listener +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, net.Listener, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got net.Listener, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// addr:"", +// tcfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := Listen(test.args.ctx, test.args.addr, test.args.tcfg) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestListener_Accept(t *testing.T) { +// type fields struct { +// Listener quic.Listener +// ctx context.Context +// } +// type want struct { +// want net.Conn +// err error +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, net.Conn, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got net.Conn, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// Listener:nil, +// ctx:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// Listener:nil, +// ctx:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// l := &Listener{ +// Listener: test.fields.Listener, +// ctx: test.fields.ctx, +// } +// +// got, err := l.Accept() +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/internal/params/option.go b/internal/params/option.go index 1e125aec7a..f34e961d0f 100644 --- a/internal/params/option.go +++ b/internal/params/option.go @@ -17,15 +17,31 @@ // Package params provides implementation of Go API for argument parser package params +import ( + "os" + "path/filepath" +) + type Option func(*parser) var defaultOptions = []Option{ + WithName(filepath.Base(os.Args[0])), WithConfigFilePathKeys("f", "file", "c", "config"), WithConfigFilePathDefault("/etc/server/config.yaml"), WithConfigFileDescription("config file path"), WithVersionKeys("v", "ver", "version"), WithVersionFlagDefault(false), WithVersionDescription("show server version"), + WithOverrideDefault(false), +} + +// WithName returns Option that sets name. +func WithName(name string) Option { + return func(p *parser) { + if name != "" { + p.name = name + } + } } // WithConfigFilePathKeys returns Option that sets filePath.keys. @@ -69,3 +85,25 @@ func WithVersionDescription(desc string) Option { p.version.description = desc } } + +// WithOverrideDefault returns Option that overrides default flag.CommandLine. +func WithOverrideDefault(flag bool) Option { + return func(p *parser) { + p.overrideDefault = flag + } +} + +// WithArgumentFilters returns Option that sets filters. +// filters is a slice of functions that takes a string and returns a bool. +// If the string not matched all filters (means filter returns false), it will be added to the arguments. +func WithArgumentFilters(filters ...func(string) bool) Option { + return func(p *parser) { + if len(filters) == 0 { + return + } + if p.filters == nil { + p.filters = make([]func(string) bool, 0, len(filters)) + } + p.filters = append(p.filters, filters...) + } +} diff --git a/internal/params/option_test.go b/internal/params/option_test.go index 0c67d11d5c..e4c2581fff 100644 --- a/internal/params/option_test.go +++ b/internal/params/option_test.go @@ -448,3 +448,258 @@ func TestWithVersionDescription(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestWithName(t *testing.T) { +// type args struct { +// name string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// name:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// name:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithName(test.args.name) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithOverrideDefault(t *testing.T) { +// type args struct { +// flag bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// flag:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// flag:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithOverrideDefault(test.args.flag) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithArgumentFilters(t *testing.T) { +// type args struct { +// filters []func(string) bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// filters:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// filters:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithArgumentFilters(test.args.filters...) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/params/params.go b/internal/params/params.go index 1b5ba05bc7..4d2f0de2b1 100644 --- a/internal/params/params.go +++ b/internal/params/params.go @@ -20,12 +20,20 @@ package params import ( "flag" "os" - "path/filepath" + "slices" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" ) +type ErrorHandling = flag.ErrorHandling + +const ( + ContinueOnError ErrorHandling = flag.ContinueOnError + PanicOnError ErrorHandling = flag.PanicOnError + ExitOnError ErrorHandling = flag.ExitOnError +) + // Data is an interface to get the configuration path and flag. type Data interface { ConfigFilePath() string @@ -43,7 +51,12 @@ type Parser interface { } type parser struct { - filePath struct { + overrideDefault bool + name string + filters []func(string) bool + f *flag.FlagSet + defaults *flag.FlagSet + filePath struct { keys []string defaultPath string description string @@ -53,6 +66,7 @@ type parser struct { defaultFlag bool description string } + ErrorHandler ErrorHandling } // New returns parser object. @@ -61,16 +75,19 @@ func New(opts ...Option) Parser { for _, opt := range append(defaultOptions, opts...) { opt(p) } + p.defaults = flag.CommandLine + p.f = flag.NewFlagSet(p.name, p.ErrorHandler) + if p.overrideDefault { + p.Override() + } return p } // Parse parses command-line argument and returns parsed data and whether there is a help option or not and error. func (p *parser) Parse() (Data, bool, error) { - f := flag.NewFlagSet(filepath.Base(os.Args[0]), flag.ContinueOnError) - d := new(data) for _, key := range p.filePath.keys { - f.StringVar(&d.configFilePath, + p.f.StringVar(&d.configFilePath, key, p.filePath.defaultPath, p.filePath.description, @@ -78,16 +95,28 @@ func (p *parser) Parse() (Data, bool, error) { } for _, key := range p.version.keys { - f.BoolVar(&d.showVersion, + p.f.BoolVar(&d.showVersion, key, p.version.defaultFlag, p.version.description, ) } - err := f.Parse(os.Args[1:]) + args := os.Args[1:] + if p.filters != nil { + args = slices.DeleteFunc(args, func(s string) bool { + for _, filter := range p.filters { + if filter != nil && filter(s) { + return true + } + } + return false + }) + } + + err := p.f.Parse(args) if err != nil { - if err != flag.ErrHelp { + if !errors.Is(err, flag.ErrHelp) { return nil, false, errors.ErrArgumentParseFailed(err) } return nil, true, nil @@ -96,13 +125,25 @@ func (p *parser) Parse() (Data, bool, error) { if exist, _, err := file.ExistsWithDetail(d.configFilePath); !d.showVersion && (!exist || d.configFilePath == "") { - f.Usage() + p.f.Usage() return nil, true, err } return d, false, nil } +func (p *parser) Restore() { + if p.defaults != nil { + flag.CommandLine = p.defaults + } +} + +func (p *parser) Override() { + if p.f != nil { + flag.CommandLine = p.f + } +} + // ConfigFilePath returns configFilePath. func (d *data) ConfigFilePath() string { return d.configFilePath diff --git a/internal/params/params_test.go b/internal/params/params_test.go index 0df3c71ac4..128b89eac8 100644 --- a/internal/params/params_test.go +++ b/internal/params/params_test.go @@ -449,3 +449,241 @@ func Test_data_ShowVersion(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func Test_parser_Restore(t *testing.T) { +// type fields struct { +// overrideDefault bool +// name string +// filters []func(string) bool +// f *flag.FlagSet +// defaults *flag.FlagSet +// filePath struct { +// keys []string +// defaultPath string +// description string +// } +// version struct { +// keys []string +// defaultFlag bool +// description string +// } +// ErrorHandler ErrorHandling +// } +// type want struct{} +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// overrideDefault:false, +// name:"", +// filters:nil, +// f:flag.FlagSet{}, +// defaults:flag.FlagSet{}, +// filePath:struct{keys []string; defaultPath string; description string}{}, +// version:struct{keys []string; defaultFlag bool; description string}{}, +// ErrorHandler:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// overrideDefault:false, +// name:"", +// filters:nil, +// f:flag.FlagSet{}, +// defaults:flag.FlagSet{}, +// filePath:struct{keys []string; defaultPath string; description string}{}, +// version:struct{keys []string; defaultFlag bool; description string}{}, +// ErrorHandler:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// p := &parser{ +// overrideDefault: test.fields.overrideDefault, +// name: test.fields.name, +// filters: test.fields.filters, +// f: test.fields.f, +// defaults: test.fields.defaults, +// filePath: test.fields.filePath, +// version: test.fields.version, +// ErrorHandler: test.fields.ErrorHandler, +// } +// +// p.Restore() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func Test_parser_Override(t *testing.T) { +// type fields struct { +// overrideDefault bool +// name string +// filters []func(string) bool +// f *flag.FlagSet +// defaults *flag.FlagSet +// filePath struct { +// keys []string +// defaultPath string +// description string +// } +// version struct { +// keys []string +// defaultFlag bool +// description string +// } +// ErrorHandler ErrorHandling +// } +// type want struct{} +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want) error { +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// overrideDefault:false, +// name:"", +// filters:nil, +// f:flag.FlagSet{}, +// defaults:flag.FlagSet{}, +// filePath:struct{keys []string; defaultPath string; description string}{}, +// version:struct{keys []string; defaultFlag bool; description string}{}, +// ErrorHandler:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// overrideDefault:false, +// name:"", +// filters:nil, +// f:flag.FlagSet{}, +// defaults:flag.FlagSet{}, +// filePath:struct{keys []string; defaultPath string; description string}{}, +// version:struct{keys []string; defaultFlag bool; description string}{}, +// ErrorHandler:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// p := &parser{ +// overrideDefault: test.fields.overrideDefault, +// name: test.fields.name, +// filters: test.fields.filters, +// f: test.fields.f, +// defaults: test.fields.defaults, +// filePath: test.fields.filePath, +// version: test.fields.version, +// ErrorHandler: test.fields.ErrorHandler, +// } +// +// p.Override() +// if err := checkFunc(test.want); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/rand/rand.go b/internal/rand/rand.go index 204e1ec23a..a935a0b80e 100644 --- a/internal/rand/rand.go +++ b/internal/rand/rand.go @@ -67,7 +67,12 @@ func (r *rand) init() *rand { if r.x == nil { r.x = new(uint32) } - x := fastime.UnixNanoNow() - atomic.StoreUint32(r.x, uint32((x>>32)^x)) + for { + seed := uint32((fastime.UnixNanoNow() >> 32) ^ fastime.UnixNanoNow()) + if seed != 0 { + atomic.StoreUint32(r.x, seed) + break + } + } return r } diff --git a/internal/servers/server/option_test.go b/internal/servers/server/option_test.go index f6a7bfc550..a4fbbd77a6 100644 --- a/internal/servers/server/option_test.go +++ b/internal/servers/server/option_test.go @@ -3724,3 +3724,343 @@ func TestDefaultHealthServerOption(t *testing.T) { // }) // } // } +// +// func TestWithGRPCMaxConcurrentStreams(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithGRPCMaxConcurrentStreams(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithGRPCNumStreamWorkers(t *testing.T) { +// type args struct { +// size uint32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// size:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithGRPCNumStreamWorkers(test.args.size) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithGRPCSharedWriteBuffer(t *testing.T) { +// type args struct { +// enable bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// enable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// enable:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithGRPCSharedWriteBuffer(test.args.enable) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestWithGRPCWaitForHandlers(t *testing.T) { +// type args struct { +// wait bool +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// wait:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// wait:false, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithGRPCWaitForHandlers(test.args.wait) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/internal/sync/errgroup/group.go b/internal/sync/errgroup/group.go index eb252943eb..1aae147b41 100644 --- a/internal/sync/errgroup/group.go +++ b/internal/sync/errgroup/group.go @@ -14,36 +14,43 @@ // limitations under the License. // -// Package errgroup provides server global wait group for graceful kill all goroutine +// Package errgroup provides a global wait group for gracefully terminating all goroutines. +// It is a custom implementation similar to sync/errgroup. package errgroup import ( "context" "runtime" + "sync/atomic" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/sync" "github.com/vdaas/vald/internal/sync/semaphore" ) -// A Group is a collection of goroutines working on subtasks that are part of -// the same overall task. -// -// A zero Group is valid, has no limit on the number of active goroutines, -// and does not cancel on error. +// Group is a collection of goroutines working on subtasks that are part of the same overall task. +// A zero Group is valid; it has no limit on the number of active goroutines and does not cancel on error. type Group interface { + // Go starts the given function either in a new goroutine or inline (if limit == 1). Go(func() error) + // SetLimit sets the maximum number of active goroutines. SetLimit(limit int) + // TryGo attempts to start the given function, returning true if it was started. TryGo(func() error) bool + // Wait blocks until all tasks started with Go have completed, returning the first non-nil error (if any). Wait() error } +// group is the concrete implementation of Group. type group struct { egctx context.Context cancel context.CancelCauseFunc wg sync.WaitGroup + // limit controls how many tasks can run concurrently. + limit atomic.Int64 + // sem is used to limit concurrent goroutines when limit > 1. sem *semaphore.Weighted cancelOnce sync.Once @@ -57,21 +64,24 @@ var ( once sync.Once ) +// New creates a new Group and returns it along with its derived Context. func New(ctx context.Context) (Group, context.Context) { - g := &group{emap: make(map[string]struct{})} + g := &group{ + emap: make(map[string]struct{}), + } + // Create a context that can be canceled with a cause. g.egctx, g.cancel = context.WithCancelCause(ctx) return g, g.egctx } // WithContext returns a new Group and an associated Context derived from ctx. -// -// The derived Context is canceled the first time a function passed to Go -// returns a non-nil error or the first time Wait returns, whichever occurs -// first. +// The derived Context is canceled the first time a function passed to Go returns a non-nil error +// or the first time Wait returns, whichever occurs first. func WithContext(ctx context.Context) (Group, context.Context) { return New(ctx) } +// Init initializes the global errgroup instance. func Init(ctx context.Context) (egctx context.Context) { egctx = ctx once.Do(func() { @@ -80,6 +90,7 @@ func Init(ctx context.Context) (egctx context.Context) { return } +// Get returns the global errgroup instance, initializing it if necessary. func Get() Group { if instance == nil { Init(context.Background()) @@ -87,49 +98,89 @@ func Get() Group { return instance } +// Go is a package-level helper that calls the Go method on the global instance. func Go(f func() error) { + if instance == nil { + Init(context.Background()) + } instance.Go(f) } +// TryGo is a package-level helper that calls the TryGo method on the global instance. func TryGo(f func() error) bool { + if instance == nil { + Init(context.Background()) + } return instance.TryGo(f) } -// SetLimit limits the number of active goroutines in this group to at most n. +// SetLimit sets the maximum number of active goroutines in the group. // A negative value indicates no limit. -// -// Any subsequent call to the Go method will block until it can add an active -// goroutine without exceeding the configured limit. -// -// The limit must not be modified while any goroutines in the group are active. +// This must not be modified while any tasks are active. func (g *group) SetLimit(limit int) { - if limit < 0 { + if limit <= 1 { + // For serial execution, do not use a semaphore. g.sem = nil + g.limit.Store(int64(limit)) return } - + // For concurrent execution, initialize or resize the semaphore. if g.sem == nil { g.sem = semaphore.NewWeighted(int64(limit)) } else { g.sem.Resize(int64(limit)) } + g.limit.Store(int64(limit)) } -// Go calls the given function in a new goroutine. -// It blocks until the new goroutine can be added without the number of -// active goroutines in the group exceeding the configured limit. -// -// The first call to return a non-nil error cancels the group's context, if the -// group was created by calling WithContext. The error will be returned by Wait. +// exec executes the provided function inline (synchronously) when limit == 1. +// It wraps the call with wait group operations and reuses executeTask for error handling. +// Performance Note: Inline execution avoids the overhead of goroutine scheduling and context switching. +// run schedules the provided function to run in a new goroutine (asynchronously). +// It wraps the call with wait group operations and reuses executeTask for error handling. +func (g *group) exec(f func() error) { + // Execute the task function. + err := f() + if err != nil { + // If the error is not due to cancellation or deadline, yield and record it. + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + runtime.Gosched() + g.appendErr(err) + } + // Cancel the context with the encountered error. + g.doCancel(err) + } +} + +// run schedules the provided function to run in a new goroutine (asynchronously). +// It wraps the call with wait group operations and reuses executeTask for error handling. +func (g *group) run(f func() error) { + g.wg.Add(1) + go func() { + // done() will call wg.Done() and release the semaphore slot. + defer g.done() + g.exec(f) + }() +} + +// Go calls the given function either in a new goroutine or inline based on the limit. +// For limit == 1, the function is executed inline to avoid unnecessary goroutine creation. +// For limit > 1, the function is scheduled in a new goroutine after acquiring the semaphore. func (g *group) Go(f func() error) { if f == nil { return } + // Check if we should execute inline (serial execution). + if g.limit.Load() == 1 { + g.exec(f) + return + } + // In concurrent mode, acquire the semaphore before launching a new goroutine. if g.sem != nil { err := g.sem.Acquire(g.egctx, 1) if err != nil { - if !errors.Is(err, context.Canceled) && - !errors.Is(err, context.DeadlineExceeded) { + // Handle errors from semaphore acquisition if not due to cancellation or deadline. + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { g.appendErr(err) } return @@ -138,14 +189,19 @@ func (g *group) Go(f func() error) { g.run(f) } -// TryGo calls the given function in a new goroutine only if the number of -// active goroutines in the group is currently below the configured limit. -// -// The return value reports whether the goroutine was started. +// TryGo attempts to run the function, starting it in a new goroutine or inline if limit == 1, +// only if the number of active tasks is below the configured limit. +// Returns true if the function was executed, false otherwise. func (g *group) TryGo(f func() error) bool { if f == nil { return false } + // Execute inline if in serial mode. + if g.limit.Load() == 1 { + g.exec(f) + return true + } + // In concurrent mode, try to acquire the semaphore without blocking. if g.sem != nil && !g.sem.TryAcquire(1) { return false } @@ -153,23 +209,7 @@ func (g *group) TryGo(f func() error) bool { return true } -func (g *group) run(f func() error) { - g.wg.Add(1) - go func() { - defer g.done() - err := f() - if err != nil { - if !errors.Is(err, context.Canceled) && - !errors.Is(err, context.DeadlineExceeded) { - runtime.Gosched() - g.appendErr(err) - } - g.doCancel(err) - return - } - }() -} - +// appendErr appends the error to the group's error list if it has not been recorded before. func (g *group) appendErr(err error) { g.mu.RLock() _, ok := g.emap[err.Error()] @@ -182,6 +222,7 @@ func (g *group) appendErr(err error) { } } +// done releases the semaphore (if used) and marks the task as done in the wait group. func (g *group) done() { defer g.wg.Done() if g.sem != nil { @@ -189,6 +230,8 @@ func (g *group) done() { } } +// doCancel cancels the group's context with the provided error. +// It ensures that cancellation is performed only once. func (g *group) doCancel(err error) { g.cancelOnce.Do(func() { if g.cancel != nil { @@ -197,14 +240,11 @@ func (g *group) doCancel(err error) { }) } -func Wait() error { - return instance.Wait() -} - -// Wait blocks until all function calls from the Go method have returned, then -// returns the first non-nil error (if any) from them. +// Wait blocks until all tasks started with Go have completed. +// It returns the first non-nil error (if any) from the executed tasks. func (g *group) Wait() (err error) { g.wg.Wait() + // After all tasks complete, cancel the context to propagate cancellation if needed. g.doCancel(context.Canceled) g.mu.RLock() defer g.mu.RUnlock() @@ -217,3 +257,11 @@ func (g *group) Wait() (err error) { return errors.Join(g.errs...) } } + +// Wait is a package-level helper that calls the Wait method on the global instance. +func Wait() error { + if instance == nil { + return nil + } + return instance.Wait() +} diff --git a/internal/sync/errgroup/group_test.go b/internal/sync/errgroup/group_test.go index 228849dc5b..6816745653 100644 --- a/internal/sync/errgroup/group_test.go +++ b/internal/sync/errgroup/group_test.go @@ -943,33 +943,29 @@ func Test_group_Wait(t *testing.T) { // } // } // -// func Test_group_TryGo(t *testing.T) { +// func Test_group_exec(t *testing.T) { // type args struct { // f func() error // } // type fields struct { // egctx context.Context // cancel context.CancelCauseFunc +// limit atomic.Int64 // sem *semaphore.Weighted // emap map[string]struct{} // errs []error // } -// type want struct { -// want bool -// } +// type want struct{} // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, bool) error +// checkFunc func(want) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got bool) error { -// if !reflect.DeepEqual(got, w.want) { -// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) -// } +// defaultCheckFunc := func(w want) error { // return nil // } // tests := []test{ @@ -983,6 +979,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1009,6 +1006,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1044,13 +1042,14 @@ func Test_group_Wait(t *testing.T) { // g := &group{ // egctx: test.fields.egctx, // cancel: test.fields.cancel, +// limit: test.fields.limit, // sem: test.fields.sem, // emap: test.fields.emap, // errs: test.fields.errs, // } // -// got := g.TryGo(test.args.f) -// if err := checkFunc(test.want, got); err != nil { +// g.exec(test.args.f) +// if err := checkFunc(test.want); err != nil { // tt.Errorf("error = %v", err) // } // }) @@ -1064,6 +1063,7 @@ func Test_group_Wait(t *testing.T) { // type fields struct { // egctx context.Context // cancel context.CancelCauseFunc +// limit atomic.Int64 // sem *semaphore.Weighted // emap map[string]struct{} // errs []error @@ -1092,6 +1092,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1118,6 +1119,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1153,6 +1155,7 @@ func Test_group_Wait(t *testing.T) { // g := &group{ // egctx: test.fields.egctx, // cancel: test.fields.cancel, +// limit: test.fields.limit, // sem: test.fields.sem, // emap: test.fields.emap, // errs: test.fields.errs, @@ -1166,6 +1169,124 @@ func Test_group_Wait(t *testing.T) { // } // } // +// func Test_group_TryGo(t *testing.T) { +// type args struct { +// f func() error +// } +// type fields struct { +// egctx context.Context +// cancel context.CancelCauseFunc +// limit atomic.Int64 +// sem *semaphore.Weighted +// emap map[string]struct{} +// errs []error +// } +// type want struct { +// want bool +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, bool) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got bool) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:nil, +// }, +// fields: fields { +// egctx:nil, +// cancel:nil, +// limit:nil, +// sem:nil, +// emap:nil, +// errs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:nil, +// }, +// fields: fields { +// egctx:nil, +// cancel:nil, +// limit:nil, +// sem:nil, +// emap:nil, +// errs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// g := &group{ +// egctx: test.fields.egctx, +// cancel: test.fields.cancel, +// limit: test.fields.limit, +// sem: test.fields.sem, +// emap: test.fields.emap, +// errs: test.fields.errs, +// } +// +// got := g.TryGo(test.args.f) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// // func Test_group_appendErr(t *testing.T) { // type args struct { // err error @@ -1173,6 +1294,7 @@ func Test_group_Wait(t *testing.T) { // type fields struct { // egctx context.Context // cancel context.CancelCauseFunc +// limit atomic.Int64 // sem *semaphore.Weighted // emap map[string]struct{} // errs []error @@ -1201,6 +1323,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1227,6 +1350,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1262,6 +1386,7 @@ func Test_group_Wait(t *testing.T) { // g := &group{ // egctx: test.fields.egctx, // cancel: test.fields.cancel, +// limit: test.fields.limit, // sem: test.fields.sem, // emap: test.fields.emap, // errs: test.fields.errs, @@ -1279,6 +1404,7 @@ func Test_group_Wait(t *testing.T) { // type fields struct { // egctx context.Context // cancel context.CancelCauseFunc +// limit atomic.Int64 // sem *semaphore.Weighted // emap map[string]struct{} // errs []error @@ -1303,6 +1429,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1326,6 +1453,7 @@ func Test_group_Wait(t *testing.T) { // fields: fields { // egctx:nil, // cancel:nil, +// limit:nil, // sem:nil, // emap:nil, // errs:nil, @@ -1361,6 +1489,7 @@ func Test_group_Wait(t *testing.T) { // g := &group{ // egctx: test.fields.egctx, // cancel: test.fields.cancel, +// limit: test.fields.limit, // sem: test.fields.sem, // emap: test.fields.emap, // errs: test.fields.errs, diff --git a/internal/test/data/vector/gen.go b/internal/test/data/vector/gen.go index 1e53be3ed0..771fadb488 100644 --- a/internal/test/data/vector/gen.go +++ b/internal/test/data/vector/gen.go @@ -16,7 +16,6 @@ package vector import ( "math" "math/rand" - "time" "github.com/vdaas/vald/internal/errors" irand "github.com/vdaas/vald/internal/rand" @@ -95,7 +94,6 @@ func NegativeUniformDistributedFloat32VectorGenerator(n, dim int) (vecs [][]floa rvs := UniformDistributedFloat32VectorGenerator(n, right) vecs = make([][]float32, 0, n) // skipcq: GO-S1033 - rand.Seed(time.Now().UnixNano()) for i := 0; i < n; i++ { // skipcq: CRT-D0001 vs := append(lvs[i], rvs[i]...) diff --git a/internal/test/data/vector/noise/noise.go b/internal/test/data/vector/noise/noise.go new file mode 100644 index 0000000000..aa7cc018ec --- /dev/null +++ b/internal/test/data/vector/noise/noise.go @@ -0,0 +1,193 @@ +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package noise provides a noise generator for adding noise to vectors. +package noise + +import ( + "math" + "math/bits" + "slices" + "time" + + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/rand" +) + +type Func func(i uint64, vec []float32) (res []float32) + +// ------------------------------- +// Public Interface for Addition | +// ------------------------------- + +// Modifier defines the interface for adding noise on‑the‑fly. +// External packages obtain the noise‑adding function via the Mod method. +type Modifier interface { + // Mod returns a function which, given a sample index and a vector, + // produces a modified vector with noise added. + Mod() Func +} + +// ------------------------------------------- +// noiseGenerator Struct and Receiver Methods | +// ------------------------------------------- + +// noiseGenerator encapsulates the dataset and all pre‑computed noise parameters. +// During construction, it calculates the optimal noise level, estimates the required noise table size, +// and precomputes a noise table using a fast Gaussian generator. These values remain constant as long as the dataset is unchanged. +type noiseGenerator struct { + // Configuration factors. + noiseLevelFactor float32 // Fraction of the average standard deviation used as noise level. + noiseTableDivisionFactor uint64 // Division factor for computing the noise table size. + minNoiseTableSize uint64 // Minimum allowed size for the noise table. + + // Precomputed parameters. + noiseTable []float32 // Precomputed noise table (each sample is already scaled by the noise level). +} + +// New constructs a new noiseGenerator instance using the Functional Option Pattern. +// It applies the default options first, then any user‑provided options. +// It precomputes the noise level and noise table size based on the input dataset and test sample count, +// and then precomputes the noise table. +// It returns a Modifier interface. +func New(data [][]float32, num uint64, opts ...Option) Modifier { + ng := new(noiseGenerator) + // Apply default options first, then any additional options. + for _, opt := range append(defaultOptions, opts...) { + opt(ng) + } + + start := time.Now() + log.Infof("started at %v to precomputes Noise Table: noiseLevelFactor: %v, noiseTableDivisionFactor: %v, minNoiseTableSize: %v", + start, ng.noiseLevelFactor, ng.noiseTableDivisionFactor, ng.minNoiseTableSize) + + // Precompute the noise level based on the dataset. + // The noise level is computed as the average standard deviation of all vectors multiplied by noiseLevelFactor. + noiseLevel := func() float32 { + if len(data) == 0 { + return 0.01 // Default if dataset is empty. + } + var totalStd float64 + var count int + for _, vec := range data { + lv := float32(len(vec)) + if lv == 0 { + continue + } + // Compute the mean of the vector. + var sum float32 + for _, v := range vec { + sum += v + } + mean := sum / lv + // Compute variance and standard deviation. + var varSum float32 + for _, v := range vec { + diff := v - mean + varSum += diff * diff + } + totalStd += math.Sqrt(float64(varSum / lv)) + count++ + } + return float32(totalStd/float64(count)) * ng.noiseLevelFactor + }() + + // Estimate the optimal noise table size. + // Heuristic: required unique noise samples = (num * vectorDim) / noiseTableDivisionFactor. + // The size is rounded up to the next power of two, ensuring it is at least minNoiseTableSize. + noiseTableSize := func() int { + if len(data) == 0 || len(data[0]) == 0 { + return 1 << 20 // Fallback default. + } + required := num * uint64(len(data[0])) // Total required unique noise samples. + // Reduce the required noise samples by the division factor. + required /= ng.noiseTableDivisionFactor + // Ensure the noise table size is at least minNoiseTableSize. + if required < ng.minNoiseTableSize { + required = ng.minNoiseTableSize + } + return 1 << bits.Len64(required-1) + }() + + // Precompute the noise table using fastGaussian32. + // The noise table is an array of noise samples (each already scaled by the computed noise level), + // and a larger table reduces periodic artifacts when the same values are reused. + var ( + haveSpare32 bool + spare32 float32 + ) + // Preallocate the noise table. + // The noise table is precomputed to avoid generating noise on‑the‑fly during the test. + // This is faster and ensures that the same noise values are used for the same sample index. + // The noise table is a power of two in size to allow for fast modulo indexing. + ng.noiseTable = make([]float32, noiseTableSize) + for i := 0; i < noiseTableSize; i++ { + ng.noiseTable[i] = func() float32 { + if haveSpare32 { + haveSpare32 = false + return spare32 + } + var u, v, s float32 + // Generate two random numbers in the range [-1, 1] until s = u*u + v*v is in (0,1). + for { + // Use Box-Muller transform to generate two independent standard normal variables. + // This is faster than using the standard library's Gaussian generator. + // rand.Float32() returns a random number in the range [0, 1). + u = rand.Float32()*2 - 1 + v = rand.Float32()*2 - 1 + s = u*u + v*v + if s > 0 && s < 1 { + break + } + } + fs := float64(s) + // Compute multiplier = sqrt(-2 * ln(s) / s) and scale it by the computed noise level. + multiplier := float32(math.Sqrt(-2*math.Log(fs)/fs)) * noiseLevel + // Cache a spare sample. + spare32 = v * multiplier + // Indicate that a spare sample is available. + haveSpare32 = true + // Return the first sample. + return u * multiplier + }() + } + log.Infof("finished at %v to precomputes Noise Table: noiseTableSize: %d, noiseLevel: %f, noiseTable: %d", + func() time.Duration { + return time.Since(start) + }(), noiseTableSize, noiseLevel, len(ng.noiseTable)) + + return ng +} + +// Mod implements the Modifier interface. +// It returns a function that, when called with a sample index and a vector, +// produces a modified vector by adding noise values from the precomputed noise table. +// The noise is selected deterministically based on the sample index. +func (ng *noiseGenerator) Mod() Func { + // Clone the noise table so that the mod function uses a copy (if needed). + noiseTable := slices.Clone(ng.noiseTable) + tableSize := uint64(len(noiseTable)) + return func(i uint64, vec []float32) (res []float32) { + // Clone the input vector to avoid mutating the original. + res = slices.Clone(vec) + n := uint64(len(res)) + baseIdx := i * n // Precompute the base index. + for j := uint64(0); j < n; j++ { + res[j] += noiseTable[(baseIdx+j)%tableSize] + } + return res + } +} diff --git a/internal/test/data/vector/noise/noise_test.go b/internal/test/data/vector/noise/noise_test.go new file mode 100644 index 0000000000..34befedbf7 --- /dev/null +++ b/internal/test/data/vector/noise/noise_test.go @@ -0,0 +1,196 @@ +package noise + +// NOT IMPLEMENTED BELOW +// +// func TestNew(t *testing.T) { +// type args struct { +// data [][]float32 +// num uint64 +// opts []Option +// } +// type want struct { +// want Modifier +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Modifier) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Modifier) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// data:nil, +// num:0, +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// data:nil, +// num:0, +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := New(test.args.data, test.args.num, test.args.opts...) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_noiseGenerator_Mod(t *testing.T) { +// type fields struct { +// noiseLevelFactor float32 +// noiseTableDivisionFactor uint64 +// minNoiseTableSize uint64 +// noiseTable []float32 +// } +// type want struct { +// want Func +// } +// type test struct { +// name string +// fields fields +// want want +// checkFunc func(want, Func) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got Func) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// fields: fields { +// noiseLevelFactor:0, +// noiseTableDivisionFactor:0, +// minNoiseTableSize:0, +// noiseTable:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// fields: fields { +// noiseLevelFactor:0, +// noiseTableDivisionFactor:0, +// minNoiseTableSize:0, +// noiseTable:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// ng := &noiseGenerator{ +// noiseLevelFactor: test.fields.noiseLevelFactor, +// noiseTableDivisionFactor: test.fields.noiseTableDivisionFactor, +// minNoiseTableSize: test.fields.minNoiseTableSize, +// noiseTable: test.fields.noiseTable, +// } +// +// got := ng.Mod() +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/internal/test/data/vector/noise/option.go b/internal/test/data/vector/noise/option.go new file mode 100644 index 0000000000..1f6e14865f --- /dev/null +++ b/internal/test/data/vector/noise/option.go @@ -0,0 +1,56 @@ +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package noise provides a noise generator for adding noise to vectors. +package noise + +// --------------------------------------- +// Functional Options and Default Options | +// --------------------------------------- + +// Option is a functional option for configuring a noiseGenerator. +type Option func(*noiseGenerator) + +// defaultOptions holds the default configuration for noiseGenerator. +var defaultOptions = []Option{ + // Set default noise level factor to 10% (i.e., 0.1) + WithLevelFactor(0.1), + // Set default noise table division factor to 10. + WithTableDivisionFactor(10), + // Set default minimum noise table size to 1024. + WithMinTableSize(1024), +} + +// WithLevelFactor sets the fraction of the average standard deviation used as the noise level. +func WithLevelFactor(f float32) Option { + return func(ng *noiseGenerator) { + ng.noiseLevelFactor = f + } +} + +// WithTableDivisionFactor sets the division factor used when sizing the noise table. +func WithTableDivisionFactor(f uint64) Option { + return func(ng *noiseGenerator) { + ng.noiseTableDivisionFactor = f + } +} + +// WithMinTableSize sets the minimum allowed size for the noise table. +func WithMinTableSize(s uint64) Option { + return func(ng *noiseGenerator) { + ng.minNoiseTableSize = s + } +} diff --git a/internal/test/data/vector/noise/option_test.go b/internal/test/data/vector/noise/option_test.go new file mode 100644 index 0000000000..ddb39f7022 --- /dev/null +++ b/internal/test/data/vector/noise/option_test.go @@ -0,0 +1,261 @@ +package noise + +// NOT IMPLEMENTED BELOW +// +// func TestWithLevelFactor(t *testing.T) { +// type args struct { +// f float32 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithLevelFactor(test.args.f) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestWithTableDivisionFactor(t *testing.T) { +// type args struct { +// f uint64 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// f:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// f:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithTableDivisionFactor(test.args.f) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestWithMinTableSize(t *testing.T) { +// type args struct { +// s uint64 +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// s:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// s:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMinTableSize(test.args.s) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/internal/timeutil/time.go b/internal/timeutil/time.go index 30e164fef3..ce55c2b90c 100644 --- a/internal/timeutil/time.go +++ b/internal/timeutil/time.go @@ -47,3 +47,13 @@ func ParseWithDefault(t string, d time.Duration) time.Duration { return parsed } + +type DurationString string + +func (d DurationString) Duration() (time.Duration, error) { + return Parse(string(d)) +} + +func (d DurationString) DurationWithDefault(def time.Duration) time.Duration { + return ParseWithDefault(string(d), def) +} diff --git a/internal/timeutil/time_test.go b/internal/timeutil/time_test.go index 8098bf9893..197210e105 100644 --- a/internal/timeutil/time_test.go +++ b/internal/timeutil/time_test.go @@ -181,3 +181,169 @@ func TestParseWithDefault(t *testing.T) { } // NOT IMPLEMENTED BELOW +// +// func TestDurationString_Duration(t *testing.T) { +// type want struct { +// want time.Duration +// err error +// } +// type test struct { +// name string +// d DurationString +// want want +// checkFunc func(want, time.Duration, error) error +// beforeFunc func(*testing.T) +// afterFunc func(*testing.T) +// } +// defaultCheckFunc := func(w want, got time.Duration, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T,) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T,) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := test.d.Duration() +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } +// +// func TestDurationString_DurationWithDefault(t *testing.T) { +// type args struct { +// def time.Duration +// } +// type want struct { +// want time.Duration +// } +// type test struct { +// name string +// args args +// d DurationString +// want want +// checkFunc func(want, time.Duration) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got time.Duration) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// def:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// def:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := test.d.DurationWithDefault(test.args.def) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/agent/core/ngt/service/ngt.go b/pkg/agent/core/ngt/service/ngt.go index 66fc3fbf2f..a4fba8cd47 100644 --- a/pkg/agent/core/ngt/service/ngt.go +++ b/pkg/agent/core/ngt/service/ngt.go @@ -1472,6 +1472,10 @@ func (n *ngt) loadStatistics(ctx context.Context) (err error) { log.Errorf("failed to load index statistics to cache: %v", err) return err } + if stats == nil { + log.Warn("failed to load index statistics to cache: stats is nil") + return nil + } n.statisticsCache.Store(&payload.Info_Index_Statistics{ Valid: stats.Valid, MedianIndegree: stats.MedianIndegree, diff --git a/pkg/agent/core/ngt/service/ngt_test.go b/pkg/agent/core/ngt/service/ngt_test.go index 2c3591184d..96325386d1 100644 --- a/pkg/agent/core/ngt/service/ngt_test.go +++ b/pkg/agent/core/ngt/service/ngt_test.go @@ -36,6 +36,7 @@ import ( core "github.com/vdaas/vald/internal/core/algorithm/ngt" "github.com/vdaas/vald/internal/errors" "github.com/vdaas/vald/internal/file" + "github.com/vdaas/vald/internal/k8s/vald" kvald "github.com/vdaas/vald/internal/k8s/vald" "github.com/vdaas/vald/internal/log" "github.com/vdaas/vald/internal/net/grpc" @@ -11042,6 +11043,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // // func Test_ngt_loadStatistics(t *testing.T) { +// type args struct { +// ctx context.Context +// } // type fields struct { // core core.NGT // eg errgroup.Group @@ -11096,11 +11100,12 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // } // type test struct { // name string +// args args // fields fields // want want // checkFunc func(want, error) error -// beforeFunc func(*testing.T) -// afterFunc func(*testing.T) +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) // } // defaultCheckFunc := func(w want, err error) error { // if !errors.Is(err, w.err) { @@ -11113,6 +11118,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // /* // { // name: "test_case_1", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -11164,10 +11172,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // }, @@ -11178,6 +11186,9 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // func() test { // return test { // name: "test_case_2", +// args: args { +// ctx:nil, +// }, // fields: fields { // core:nil, // eg:nil, @@ -11229,10 +11240,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // }, // want: want{}, // checkFunc: defaultCheckFunc, -// beforeFunc: func(t *testing.T,) { +// beforeFunc: func(t *testing.T, args args) { // t.Helper() // }, -// afterFunc: func(t *testing.T,) { +// afterFunc: func(t *testing.T, args args) { // t.Helper() // }, // } @@ -11246,10 +11257,10 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // tt.Parallel() // defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) // if test.beforeFunc != nil { -// test.beforeFunc(tt) +// test.beforeFunc(tt, test.args) // } // if test.afterFunc != nil { -// defer test.afterFunc(tt) +// defer test.afterFunc(tt, test.args) // } // checkFunc := test.checkFunc // if test.checkFunc == nil { @@ -11305,7 +11316,7 @@ func createRandomData(num int, cfg *createRandomDataConfig) []index { // statisticsCache: test.fields.statisticsCache, // } // -// err := n.loadStatistics() +// err := n.loadStatistics(test.args.ctx) // if err := checkFunc(test.want, err); err != nil { // tt.Errorf("error = %v", err) // } diff --git a/pkg/discoverer/k8s/service/discover_test.go b/pkg/discoverer/k8s/service/discover_test.go index c0211bbb22..7f346f6723 100644 --- a/pkg/discoverer/k8s/service/discover_test.go +++ b/pkg/discoverer/k8s/service/discover_test.go @@ -122,11 +122,11 @@ package service // pods sync.Map[string, *[]pod.Pod] // podMetrics sync.Map[string, mpod.Pod] // services sync.Map[string, *service.Service] -// podsByNode atomic.Value -// podsByNamespace atomic.Value -// podsByName atomic.Value -// nodeByName atomic.Value -// svcsByName atomic.Value +// podsByNode atomic.Pointer[map[string]map[string]map[string][]*payload.Info_Pod] +// podsByNamespace atomic.Pointer[map[string]map[string][]*payload.Info_Pod] +// podsByName atomic.Pointer[map[string][]*payload.Info_Pod] +// nodeByName atomic.Pointer[map[string]*payload.Info_Node] +// svcsByName atomic.Pointer[map[string]*payload.Info_Service] // ctrl k8s.Controller // namespace string // name string @@ -288,11 +288,11 @@ package service // pods sync.Map[string, *[]pod.Pod] // podMetrics sync.Map[string, mpod.Pod] // services sync.Map[string, *service.Service] -// podsByNode atomic.Value -// podsByNamespace atomic.Value -// podsByName atomic.Value -// nodeByName atomic.Value -// svcsByName atomic.Value +// podsByNode atomic.Pointer[map[string]map[string]map[string][]*payload.Info_Pod] +// podsByNamespace atomic.Pointer[map[string]map[string][]*payload.Info_Pod] +// podsByName atomic.Pointer[map[string][]*payload.Info_Pod] +// nodeByName atomic.Pointer[map[string]*payload.Info_Node] +// svcsByName atomic.Pointer[map[string]*payload.Info_Service] // ctrl k8s.Controller // namespace string // name string @@ -454,11 +454,11 @@ package service // pods sync.Map[string, *[]pod.Pod] // podMetrics sync.Map[string, mpod.Pod] // services sync.Map[string, *service.Service] -// podsByNode atomic.Value -// podsByNamespace atomic.Value -// podsByName atomic.Value -// nodeByName atomic.Value -// svcsByName atomic.Value +// podsByNode atomic.Pointer[map[string]map[string]map[string][]*payload.Info_Pod] +// podsByNamespace atomic.Pointer[map[string]map[string][]*payload.Info_Pod] +// podsByName atomic.Pointer[map[string][]*payload.Info_Pod] +// nodeByName atomic.Pointer[map[string]*payload.Info_Node] +// svcsByName atomic.Pointer[map[string]*payload.Info_Service] // ctrl k8s.Controller // namespace string // name string @@ -620,11 +620,11 @@ package service // pods sync.Map[string, *[]pod.Pod] // podMetrics sync.Map[string, mpod.Pod] // services sync.Map[string, *service.Service] -// podsByNode atomic.Value -// podsByNamespace atomic.Value -// podsByName atomic.Value -// nodeByName atomic.Value -// svcsByName atomic.Value +// podsByNode atomic.Pointer[map[string]map[string]map[string][]*payload.Info_Pod] +// podsByNamespace atomic.Pointer[map[string]map[string][]*payload.Info_Pod] +// podsByName atomic.Pointer[map[string][]*payload.Info_Pod] +// nodeByName atomic.Pointer[map[string]*payload.Info_Node] +// svcsByName atomic.Pointer[map[string]*payload.Info_Service] // ctrl k8s.Controller // namespace string // name string diff --git a/pkg/gateway/lb/handler/grpc/aggregation_test.go b/pkg/gateway/lb/handler/grpc/aggregation_test.go index de65871892..9f6a6f2445 100644 --- a/pkg/gateway/lb/handler/grpc/aggregation_test.go +++ b/pkg/gateway/lb/handler/grpc/aggregation_test.go @@ -34,25 +34,29 @@ package grpc // UnimplementedValdServer vald.UnimplementedValdServer // } // type want struct { -// wantRes *payload.Search_Response -// err error +// wantRes *payload.Search_Response +// wantAttrs []attribute.KeyValue +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, *payload.Search_Response, []attribute.KeyValue, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, gotAttrs []attribute.KeyValue, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // if !reflect.DeepEqual(gotRes, w.wantRes) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) // } +// if !reflect.DeepEqual(gotAttrs, w.wantAttrs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotAttrs, w.wantAttrs) +// } // return nil // } // tests := []test{ @@ -150,8 +154,8 @@ package grpc // UnimplementedValdServer: test.fields.UnimplementedValdServer, // } // -// gotRes, err := s.aggregationSearch(test.args.ctx, test.args.aggr, test.args.bcfg, test.args.f) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// gotRes, gotAttrs, err := s.aggregationSearch(test.args.ctx, test.args.aggr, test.args.bcfg, test.args.f) +// if err := checkFunc(test.want, gotRes, gotAttrs, err); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/gateway/lb/handler/grpc/handler_test.go b/pkg/gateway/lb/handler/grpc/handler_test.go index ca0ee8add4..0ddee37c4e 100644 --- a/pkg/gateway/lb/handler/grpc/handler_test.go +++ b/pkg/gateway/lb/handler/grpc/handler_test.go @@ -802,25 +802,29 @@ package grpc // UnimplementedValdServer vald.UnimplementedValdServer // } // type want struct { -// wantRes *payload.Search_Response -// err error +// wantRes *payload.Search_Response +// wantAttrs []attribute.KeyValue +// err error // } // type test struct { // name string // args args // fields fields // want want -// checkFunc func(want, *payload.Search_Response, error) error +// checkFunc func(want, *payload.Search_Response, []attribute.KeyValue, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, err error) error { +// defaultCheckFunc := func(w want, gotRes *payload.Search_Response, gotAttrs []attribute.KeyValue, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } // if !reflect.DeepEqual(gotRes, w.wantRes) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotRes, w.wantRes) // } +// if !reflect.DeepEqual(gotAttrs, w.wantAttrs) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotAttrs, w.wantAttrs) +// } // return nil // } // tests := []test{ @@ -916,8 +920,8 @@ package grpc // UnimplementedValdServer: test.fields.UnimplementedValdServer, // } // -// gotRes, err := s.doSearch(test.args.ctx, test.args.cfg, test.args.f) -// if err := checkFunc(test.want, gotRes, err); err != nil { +// gotRes, gotAttrs, err := s.doSearch(test.args.ctx, test.args.cfg, test.args.f) +// if err := checkFunc(test.want, gotRes, gotAttrs, err); err != nil { // tt.Errorf("error = %v", err) // } // }) diff --git a/pkg/index/job/deletion/config/config_test.go b/pkg/index/job/deletion/config/config_test.go new file mode 100644 index 0000000000..ac346d089c --- /dev/null +++ b/pkg/index/job/deletion/config/config_test.go @@ -0,0 +1,93 @@ +package config + +// NOT IMPLEMENTED BELOW +// +// func TestNewConfig(t *testing.T) { +// type args struct { +// path string +// } +// type want struct { +// wantCfg *Data +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, *Data, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, gotCfg *Data, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(gotCfg, w.wantCfg) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", gotCfg, w.wantCfg) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// path:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// gotCfg, err := NewConfig(test.args.path) +// if err := checkFunc(test.want, gotCfg, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/pkg/index/job/deletion/service/deleter_test.go b/pkg/index/job/deletion/service/deleter_test.go new file mode 100644 index 0000000000..ac2afb2d2c --- /dev/null +++ b/pkg/index/job/deletion/service/deleter_test.go @@ -0,0 +1,519 @@ +package service + +// NOT IMPLEMENTED BELOW +// +// func TestNew(t *testing.T) { +// type args struct { +// opts []Option +// } +// type want struct { +// want Deleter +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Deleter, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Deleter, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// opts:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := New(test.args.opts...) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_delDuplicateAddrs(t *testing.T) { +// type args struct { +// targetAddrs []string +// } +// type want struct { +// want []string +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, []string) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got []string) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// targetAddrs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// targetAddrs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := delDuplicateAddrs(test.args.targetAddrs) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_index_StartClient(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// client discoverer.Client +// targetAddrs []string +// targetIndexID string +// concurrency int +// } +// type want struct { +// want <-chan error +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, <-chan error, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got <-chan error, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// idx := &index{ +// client: test.fields.client, +// targetAddrs: test.fields.targetAddrs, +// targetIndexID: test.fields.targetIndexID, +// concurrency: test.fields.concurrency, +// } +// +// got, err := idx.StartClient(test.args.ctx) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_index_Start(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// client discoverer.Client +// targetAddrs []string +// targetIndexID string +// concurrency int +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// idx := &index{ +// client: test.fields.client, +// targetAddrs: test.fields.targetAddrs, +// targetIndexID: test.fields.targetIndexID, +// concurrency: test.fields.concurrency, +// } +// +// err := idx.Start(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_index_doDeleteIndex(t *testing.T) { +// type args struct { +// ctx context.Context +// fn func(_ context.Context, _ vald.RemoveClient, _ ...grpc.CallOption) (*payload.Object_Location, error) +// } +// type fields struct { +// client discoverer.Client +// targetAddrs []string +// targetIndexID string +// concurrency int +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// fn:nil, +// }, +// fields: fields { +// client:nil, +// targetAddrs:nil, +// targetIndexID:"", +// concurrency:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// idx := &index{ +// client: test.fields.client, +// targetAddrs: test.fields.targetAddrs, +// targetIndexID: test.fields.targetIndexID, +// concurrency: test.fields.concurrency, +// } +// +// err := idx.doDeleteIndex(test.args.ctx, test.args.fn) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/pkg/index/job/deletion/service/options_test.go b/pkg/index/job/deletion/service/options_test.go new file mode 100644 index 0000000000..96e1c5dfd3 --- /dev/null +++ b/pkg/index/job/deletion/service/options_test.go @@ -0,0 +1,347 @@ +package service + +// NOT IMPLEMENTED BELOW +// +// func TestWithDiscoverer(t *testing.T) { +// type args struct { +// client discoverer.Client +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// client:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// client:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithDiscoverer(test.args.client) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestWithIndexingConcurrency(t *testing.T) { +// type args struct { +// num int +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// num:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// num:0, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithIndexingConcurrency(test.args.num) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestWithTargetAddrs(t *testing.T) { +// type args struct { +// addrs []string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// addrs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// addrs:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithTargetAddrs(test.args.addrs...) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func TestWithTargetIndexID(t *testing.T) { +// type args struct { +// indexID string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// indexID:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// indexID:"", +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithTargetIndexID(test.args.indexID) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/pkg/index/job/deletion/usecase/deletion_test.go b/pkg/index/job/deletion/usecase/deletion_test.go new file mode 100644 index 0000000000..a9e53a139a --- /dev/null +++ b/pkg/index/job/deletion/usecase/deletion_test.go @@ -0,0 +1,672 @@ +package usecase + +// NOT IMPLEMENTED BELOW +// +// func TestNew(t *testing.T) { +// type args struct { +// cfg *config.Data +// } +// type want struct { +// want runner.Runner +// err error +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, runner.Runner, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got runner.Runner, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// cfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// cfg:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got, err := New(test.args.cfg) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_run_PreStart(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// eg errgroup.Group +// cfg *config.Data +// observability observability.Observability +// server starter.Server +// indexer service.Deleter +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// r := &run{ +// eg: test.fields.eg, +// cfg: test.fields.cfg, +// observability: test.fields.observability, +// server: test.fields.server, +// indexer: test.fields.indexer, +// } +// +// err := r.PreStart(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_run_Start(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// eg errgroup.Group +// cfg *config.Data +// observability observability.Observability +// server starter.Server +// indexer service.Deleter +// } +// type want struct { +// want <-chan error +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, <-chan error, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got <-chan error, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// r := &run{ +// eg: test.fields.eg, +// cfg: test.fields.cfg, +// observability: test.fields.observability, +// server: test.fields.server, +// indexer: test.fields.indexer, +// } +// +// got, err := r.Start(test.args.ctx) +// if err := checkFunc(test.want, got, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_run_PreStop(t *testing.T) { +// type args struct { +// in0 context.Context +// } +// type fields struct { +// eg errgroup.Group +// cfg *config.Data +// observability observability.Observability +// server starter.Server +// indexer service.Deleter +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// in0:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// in0:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// r := &run{ +// eg: test.fields.eg, +// cfg: test.fields.cfg, +// observability: test.fields.observability, +// server: test.fields.server, +// indexer: test.fields.indexer, +// } +// +// err := r.PreStop(test.args.in0) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_run_Stop(t *testing.T) { +// type args struct { +// ctx context.Context +// } +// type fields struct { +// eg errgroup.Group +// cfg *config.Data +// observability observability.Observability +// server starter.Server +// indexer service.Deleter +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// ctx:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// r := &run{ +// eg: test.fields.eg, +// cfg: test.fields.cfg, +// observability: test.fields.observability, +// server: test.fields.server, +// indexer: test.fields.indexer, +// } +// +// err := r.Stop(test.args.ctx) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } +// +// func Test_run_PostStop(t *testing.T) { +// type args struct { +// in0 context.Context +// } +// type fields struct { +// eg errgroup.Group +// cfg *config.Data +// observability observability.Observability +// server starter.Server +// indexer service.Deleter +// } +// type want struct { +// err error +// } +// type test struct { +// name string +// args args +// fields fields +// want want +// checkFunc func(want, error) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, err error) error { +// if !errors.Is(err, w.err) { +// return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// in0:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// in0:nil, +// }, +// fields: fields { +// eg:nil, +// cfg:nil, +// observability:nil, +// server:nil, +// indexer:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// r := &run{ +// eg: test.fields.eg, +// cfg: test.fields.cfg, +// observability: test.fields.observability, +// server: test.fields.server, +// indexer: test.fields.indexer, +// } +// +// err := r.PostStop(test.args.in0) +// if err := checkFunc(test.want, err); err != nil { +// tt.Errorf("error = %v", err) +// } +// +// }) +// } +// } diff --git a/pkg/tools/benchmark/job/service/insert_test.go b/pkg/tools/benchmark/job/service/insert_test.go index 2fff1a067a..9b0af7b583 100644 --- a/pkg/tools/benchmark/job/service/insert_test.go +++ b/pkg/tools/benchmark/job/service/insert_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/job_test.go b/pkg/tools/benchmark/job/service/job_test.go index 354db96498..3dad8eaa07 100644 --- a/pkg/tools/benchmark/job/service/job_test.go +++ b/pkg/tools/benchmark/job/service/job_test.go @@ -187,6 +187,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -236,6 +237,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -278,6 +280,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -329,6 +332,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, @@ -365,6 +369,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -418,6 +423,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -460,6 +466,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -511,6 +518,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, @@ -547,6 +555,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -596,6 +605,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -638,6 +648,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -689,6 +700,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/object_test.go b/pkg/tools/benchmark/job/service/object_test.go index ca4bca8a8c..30cd0a48fa 100644 --- a/pkg/tools/benchmark/job/service/object_test.go +++ b/pkg/tools/benchmark/job/service/object_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, @@ -204,6 +208,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -254,6 +259,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -297,6 +303,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -348,6 +355,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/option_test.go b/pkg/tools/benchmark/job/service/option_test.go index a4784b8cc5..b76e882f91 100644 --- a/pkg/tools/benchmark/job/service/option_test.go +++ b/pkg/tools/benchmark/job/service/option_test.go @@ -1629,3 +1629,88 @@ package service // }) // } // } +// +// func TestWithMetadata(t *testing.T) { +// type args struct { +// m map[string]string +// } +// type want struct { +// want Option +// } +// type test struct { +// name string +// args args +// want want +// checkFunc func(want, Option) error +// beforeFunc func(*testing.T, args) +// afterFunc func(*testing.T, args) +// } +// defaultCheckFunc := func(w want, got Option) error { +// if !reflect.DeepEqual(got, w.want) { +// return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) +// } +// return nil +// } +// tests := []test{ +// // TODO test cases +// /* +// { +// name: "test_case_1", +// args: args { +// m:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// }, +// */ +// +// // TODO test cases +// /* +// func() test { +// return test { +// name: "test_case_2", +// args: args { +// m:nil, +// }, +// want: want{}, +// checkFunc: defaultCheckFunc, +// beforeFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// afterFunc: func(t *testing.T, args args) { +// t.Helper() +// }, +// } +// }(), +// */ +// } +// +// for _, tc := range tests { +// test := tc +// t.Run(test.name, func(tt *testing.T) { +// tt.Parallel() +// defer goleak.VerifyNone(tt, goleak.IgnoreCurrent()) +// if test.beforeFunc != nil { +// test.beforeFunc(tt, test.args) +// } +// if test.afterFunc != nil { +// defer test.afterFunc(tt, test.args) +// } +// checkFunc := test.checkFunc +// if test.checkFunc == nil { +// checkFunc = defaultCheckFunc +// } +// +// got := WithMetadata(test.args.m) +// if err := checkFunc(test.want, got); err != nil { +// tt.Errorf("error = %v", err) +// } +// }) +// } +// } diff --git a/pkg/tools/benchmark/job/service/remove_test.go b/pkg/tools/benchmark/job/service/remove_test.go index 081a749179..f5f1e4ef11 100644 --- a/pkg/tools/benchmark/job/service/remove_test.go +++ b/pkg/tools/benchmark/job/service/remove_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/search_test.go b/pkg/tools/benchmark/job/service/search_test.go index 0096d31a56..83bf82ed52 100644 --- a/pkg/tools/benchmark/job/service/search_test.go +++ b/pkg/tools/benchmark/job/service/search_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/update_test.go b/pkg/tools/benchmark/job/service/update_test.go index 50e202f719..90b4f0ca0e 100644 --- a/pkg/tools/benchmark/job/service/update_test.go +++ b/pkg/tools/benchmark/job/service/update_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/benchmark/job/service/upsert_test.go b/pkg/tools/benchmark/job/service/upsert_test.go index 3aa71f4c30..538502e0eb 100644 --- a/pkg/tools/benchmark/job/service/upsert_test.go +++ b/pkg/tools/benchmark/job/service/upsert_test.go @@ -23,6 +23,7 @@ package service // type fields struct { // eg errgroup.Group // dataset *config.BenchmarkDataset +// meta grpc.MD // jobType jobType // jobFunc func(context.Context, chan error) error // insertConfig *config.InsertConfig @@ -73,6 +74,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -116,6 +118,7 @@ package service // fields: fields { // eg:nil, // dataset:nil, +// meta:nil, // jobType:nil, // jobFunc:nil, // insertConfig:nil, @@ -167,6 +170,7 @@ package service // j := &job{ // eg: test.fields.eg, // dataset: test.fields.dataset, +// meta: test.fields.meta, // jobType: test.fields.jobType, // jobFunc: test.fields.jobFunc, // insertConfig: test.fields.insertConfig, diff --git a/pkg/tools/cli/loadtest/service/insert_test.go b/pkg/tools/cli/loadtest/service/insert_test.go index 71f69e8653..031ade7993 100644 --- a/pkg/tools/cli/loadtest/service/insert_test.go +++ b/pkg/tools/cli/loadtest/service/insert_test.go @@ -21,7 +21,7 @@ package service // batchSize int // } // type want struct { -// wantF func() any +// wantF func() *any // wantSize int // err error // } @@ -29,11 +29,11 @@ package service // name string // args args // want want -// checkFunc func(want, func() any, int, error) error +// checkFunc func(want, func() *any, int, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, gotF func() any, gotSize int, err error) error { +// defaultCheckFunc := func(w want, gotF func() *any, gotSize int, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } @@ -116,18 +116,18 @@ package service // dataset assets.Dataset // } // type want struct { -// want func() any +// want func() *any // want1 int // } // type test struct { // name string // args args // want want -// checkFunc func(want, func() any, int) error +// checkFunc func(want, func() *any, int) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got func() any, got1 int) error { +// defaultCheckFunc := func(w want, got func() *any, got1 int) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -206,18 +206,18 @@ package service // n int // } // type want struct { -// want func() any +// want func() *any // want1 int // } // type test struct { // name string // args args // want want -// checkFunc func(want, func() any, int) error +// checkFunc func(want, func() *any, int) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got func() any, got1 int) error { +// defaultCheckFunc := func(w want, got func() *any, got1 int) error { // if !reflect.DeepEqual(got, w.want) { // return errors.Errorf("got: \"%#v\",\n\t\t\t\twant: \"%#v\"", got, w.want) // } @@ -302,7 +302,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -341,7 +341,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -370,7 +370,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -411,7 +411,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } @@ -434,7 +434,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -473,7 +473,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -502,7 +502,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -543,7 +543,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } diff --git a/pkg/tools/cli/loadtest/service/loader.go b/pkg/tools/cli/loadtest/service/loader.go index 2c896b1825..98b7ec7e9d 100644 --- a/pkg/tools/cli/loadtest/service/loader.go +++ b/pkg/tools/cli/loadtest/service/loader.go @@ -204,12 +204,12 @@ func (l *loader) do( } if l.operation == config.StreamInsert { - return nil, grpc.BidirectionalStreamClient(st.(grpc.ClientStream), l.sendDataProvider, func(i *payload.Empty, err error) bool { + return nil, grpc.BidirectionalStreamClient(st.(grpc.ClientStream), l.concurrency, l.sendDataProvider, func(i *payload.Empty, err error) bool { f(nil, err) return true }) } else { - return nil, grpc.BidirectionalStreamClient(st.(grpc.ClientStream), l.sendDataProvider, func(i *payload.Search_Response, err error) bool { + return nil, grpc.BidirectionalStreamClient(st.(grpc.ClientStream), l.concurrency, l.sendDataProvider, func(i *payload.Search_Response, err error) bool { f(nil, err) return true }) diff --git a/pkg/tools/cli/loadtest/service/loader_test.go b/pkg/tools/cli/loadtest/service/loader_test.go index 913bd348f3..f67b37f6bc 100644 --- a/pkg/tools/cli/loadtest/service/loader_test.go +++ b/pkg/tools/cli/loadtest/service/loader_test.go @@ -117,7 +117,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -156,7 +156,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -188,7 +188,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -229,7 +229,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } @@ -255,7 +255,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -294,7 +294,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -326,7 +326,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -367,7 +367,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } @@ -383,7 +383,7 @@ package service // func Test_loader_do(t *testing.T) { // type args struct { // ctx context.Context -// f func(any, error) +// f func(*any, error) // notify func(context.Context, error) // } // type fields struct { @@ -395,7 +395,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -436,7 +436,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -470,7 +470,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -511,7 +511,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } diff --git a/pkg/tools/cli/loadtest/service/search_test.go b/pkg/tools/cli/loadtest/service/search_test.go index 16149d31c2..48bef4d635 100644 --- a/pkg/tools/cli/loadtest/service/search_test.go +++ b/pkg/tools/cli/loadtest/service/search_test.go @@ -20,7 +20,7 @@ package service // dataset assets.Dataset // } // type want struct { -// want func() any +// want func() *any // want1 int // err error // } @@ -28,11 +28,11 @@ package service // name string // args args // want want -// checkFunc func(want, func() any, int, error) error +// checkFunc func(want, func() *any, int, error) error // beforeFunc func(*testing.T, args) // afterFunc func(*testing.T, args) // } -// defaultCheckFunc := func(w want, got func() any, got1 int, err error) error { +// defaultCheckFunc := func(w want, got func() *any, got1 int, err error) error { // if !errors.Is(err, w.err) { // return errors.Errorf("got_error: \"%#v\",\n\t\t\t\twant: \"%#v\"", err, w.err) // } @@ -118,7 +118,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -157,7 +157,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -186,7 +186,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -227,7 +227,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } @@ -250,7 +250,7 @@ package service // dataset string // progressDuration time.Duration // loaderFunc loadFunc -// dataProvider func() any +// sendDataProvider func() *any // dataSize int // operation config.Operation // } @@ -289,7 +289,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -318,7 +318,7 @@ package service // dataset:"", // progressDuration:nil, // loaderFunc:nil, -// dataProvider:nil, +// sendDataProvider:nil, // dataSize:0, // operation:nil, // }, @@ -359,7 +359,7 @@ package service // dataset: test.fields.dataset, // progressDuration: test.fields.progressDuration, // loaderFunc: test.fields.loaderFunc, -// dataProvider: test.fields.dataProvider, +// sendDataProvider: test.fields.sendDataProvider, // dataSize: test.fields.dataSize, // operation: test.fields.operation, // } diff --git a/tests/v2/e2e/assets/rollout.yaml b/tests/v2/e2e/assets/rollout.yaml new file mode 100644 index 0000000000..1b62c66000 --- /dev/null +++ b/tests/v2/e2e/assets/rollout.yaml @@ -0,0 +1,26 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +target: + addrs: + - localhost +dataset: + name: "fashion-mnist-784-euclidean.hdf5" +index: + wait_after_inesrt: "2m" +kubernetes: + kubeconfig: ${HOME}/.kube/config + portforward: + enabled: false diff --git a/tests/v2/e2e/assets/unary_crud.yaml b/tests/v2/e2e/assets/unary_crud.yaml new file mode 100644 index 0000000000..30bf46ee72 --- /dev/null +++ b/tests/v2/e2e/assets/unary_crud.yaml @@ -0,0 +1,443 @@ +# +# Copyright (C) 2019-2025 vdaas.org vald team +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# You may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +time_zone: UTC +logging: + format: raw + level: info + logger: glg +dataset: + name: _E2E_DATASET_PATH_ +kubernetes: + kube_config: $HOME/.kube/config + port_forward: + enabled: true + local_port: 8082 + namespace: _E2E_TARGET_NAMESPACE_ + service_name: _E2E_TARGET_NAME_ + target_port: 8081 +target: + addrs: + - 127.0.0.1:8082 + health_check_duration: "1s" + connection_pool: + enable_dns_resolver: true + enable_rebalance: true + old_conn_close_duration: 2m + rebalance_duration: 30m + size: 3 + backoff: + backoff_factor: 1.1 + backoff_time_limit: 5s + enable_error_log: false + initial_duration: 5ms + jitter_limit: 100ms + maximum_duration: 5s + retry_count: 100 + # circuit_breaker: + # closed_error_rate: 0.7 + # closed_refresh_timeout: 10s + # half_open_error_rate: 0.5 + # min_samples: 1000 + # open_timeout: 1s + call_option: + content_subtype: "" + max_recv_msg_size: 0 + max_retry_rpc_buffer_size: 0 + max_send_msg_size: 0 + wait_for_ready: true + dial_option: + authority: "" + backoff_base_delay: 1s + backoff_jitter: 0.2 + backoff_max_delay: 120s + backoff_multiplier: 1.6 + disable_retry: false + enable_backoff: true + idle_timeout: 1h + initial_connection_window_size: 2097152 + initial_window_size: 1048576 + insecure: true + interceptors: [] + keepalive: + permit_without_stream: false + time: "" + timeout: 30s + max_call_attempts: 0 + max_header_list_size: 0 + max_msg_size: 0 + min_connection_timeout: 20s + net: + dialer: + dual_stack_enabled: true + keepalive: "" + timeout: "" + dns: + cache_enabled: true + cache_expiration: 1h + refresh_duration: 30m + network: tcp + socket_option: + ip_recover_destination_addr: false + ip_transparent: false + reuse_addr: true + reuse_port: true + tcp_cork: false + tcp_defer_accept: false + tcp_fast_open: false + tcp_no_delay: false + tcp_quick_ack: false + tls: + ca: /path/to/ca + cert: /path/to/cert + enabled: false + insecure_skip_verify: false + key: /path/to/key + read_buffer_size: 0 + shared_write_buffer: false + timeout: "" + user_agent: Vald-gRPC + write_buffer_size: 0 + tls: + ca: /path/to/ca + cert: /path/to/cert + enabled: false + insecure_skip_verify: false + key: /path/to/key +metadata: + key1: sample metadata value1 + key2: sample metadata value2 + key3: sample metadata value3 +metadata_string: key4=value4,key5=value5 +strategies: + # - name: Schema + # concurrency: 1 + # delay: "" + # wait: "" + # timeout: "" + # operations: + # - name: Schema + # delay: "" + # wait: "" + # timeout: "" + # executions: + # - name: Schema + # type: index_property + # mode: unary + # parallelism: 0 + # num: 0 + # offset: 0 + # bulk_size: 0 + # # for timing setting + # delay: "" + # wait: "" + # timeout: "" + # # for search configurations + # search: + # - k: 10 + # radius: -1 + # epsilon: 0.05 + # algorithm: cq + # min_num: 2 + # ration: 0 + # nprobe: 0 + # timeout: 3s + # - k: 20 + # radius: -1 + # epsilon: 0.05 + # algorithm: ph + # min_num: 2 + # ration: 0 + # nprobe: 0 + # timeout: 6s + # # for modification like (Insert, Update, Upsert, Remove, RemoveByTimestamp) + # modification: + # skip_strict_exist_check: false + # timestamp: 0 + # # expected patterns of test status codes + # expected_status_codes: + # - ok + # - already_exists + # - not_found + # # for kubernetes configurations + # kubernetes: + # kind: "statefulset" + # namespace: "default" + # name: "vald-agent" + # action: rollout + - concurrency: 1 + name: check Index Property + operations: + - name: IndexProperty + executions: + - mode: unary + name: IndexProperty + type: index_property + wait: 3s + - concurrency: 1 + name: Initial Insert and Wait + operations: + - name: Inset Operation + executions: + - name: Insert + type: insert + mode: unary + parallelism: 10 + num: 30000 + qps: 1000 + wait: 2m + - mode: unary + name: IndexInfo + type: index_info + name: Insert -> IndexInfo + - concurrency: 4 + name: Parallel Search Opeation (Search, SearchByID, LinearSearch, LinearSearchByID) x (ConcurrentQueue, SortSlice, SortPoolSlice, PairingHeap) = 16 + operations: + - name: Search Operation + executions: + - name: Search with ConcurrentQueue + type: search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: cq + - name: Search with SortSlice + type: search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ss + - name: Search with SortPoolSlice + type: search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: Search with PairingHeap + type: search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: SearchByID Operation + executions: + - name: SearchByID with ConcurrentQueue + type: search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: cq + - name: SearchByID with SortSlice + type: search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ss + - name: SearchByID with SortPoolSlice + type: search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: SearchByID with PairingHeap + type: search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: LinearSearch Operation + executions: + - name: LinearSearch with ConcurrentQueue + type: linear_search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: cq + - name: LinearSearch with SortSlice + type: linear_search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ss + - name: LinearSearch with SortPoolSlice + type: linear_search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: LinearSearch with PairingHeap + type: linear_search + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: LinearSearchByID Operation + executions: + - name: LinearSearchByID with ConcurrentQueue + type: linear_search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: cq + - name: LinearSearchByID with SortSlice + type: linear_search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ss + - name: LinearSearchByID with SortPoolSlice + type: linear_search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - name: LinearSearchByID with PairingHeap + type: linear_search_by_id + mode: unary + parallelism: 10 + num: 1000 + search: + timeout: 3s + algorithm: ps + - concurrency: 3 + name: GetObject/Exists/GetTimestamp Opeation + operations: + - name: GetObject Operation + executions: + - name: GetObject + type: object + mode: unary + parallelism: 10 + num: 1000 + - name: Exists Operation + executions: + - name: Exists + type: exists + mode: unary + parallelism: 10 + num: 1000 + - name: GetTimestamp Operation + executions: + - name: GetTimestamp + type: timestamp + mode: unary + parallelism: 10 + num: 1000 + - concurrency: 1 + name: Update -> Index Detail + operations: + - name: Update Index Detail Operation + executions: + - name: Update + type: update + mode: unary + parallelism: 10 + num: 10000 + offset: 10000 + wait: 2m + - name: IndexDetail + type: index_detail + mode: unary + - concurrency: 2 + name: Remove with Upsert -> Index stats and detail + operations: + - name: Remove IndexStatistics Operation + executions: + - name: Remove + type: remove + mode: unary + parallelism: 10 + num: 10000 + - name: IndexStatistics + type: index_statistics + mode: unary + - name: Upsert IndexDetail Operation + executions: + - name: Upsert + type: upsert + mode: unary + parallelism: 10 + num: 10000 + - name: IndexDetail + type: index_detail + mode: unary + wait: 2m + - concurrency: 1 + name: RemoveByTimestamp -> IndexDetail -> Upsert -> IndexDetail + operations: + - name: RemoveByTimestamp IndexDetail Upsert Operation + executions: + - name: RemoveByTimestamp + mode: unary + type: remove_by_timestamp + wait: 2m + num: 1 + - name: IndexDetail + mode: unary + type: index_detail + - name: Upsert + parallelism: 10 + mode: unary + num: 10000 + offset: 20000 + type: upsert + wait: 2m + - name: IndexDetail + mode: unary + type: index_detail + - concurrency: 1 + name: IndexStatistics -> Flush -> IndexInfo + operations: + - executions: + - name: IndexStatistics + mode: unary + type: index_statistics_detail + - name: Flush + mode: unary + type: flush + wait: 2m + - name: IndexInfo + mode: unary + type: index_info diff --git a/tests/v2/e2e/config/config.go b/tests/v2/e2e/config/config.go new file mode 100644 index 0000000000..2c83f4e5b9 --- /dev/null +++ b/tests/v2/e2e/config/config.go @@ -0,0 +1,775 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package config provides configuration types and logic for loading and binding configuration values. +// This file includes refactored Bind methods (always returning error) and non-Bind functions, +// with named return values and proper ordering of sections. +package config + +import ( + "strconv" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/config" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/file" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc/codes" + "github.com/vdaas/vald/internal/strings" + "github.com/vdaas/vald/internal/timeutil" + "github.com/vdaas/vald/internal/timeutil/rate" +) + +//////////////////////////////////////////////////////////////////////////////// +// Struct Section +//////////////////////////////////////////////////////////////////////////////// + +// Data represents the complete configuration for the application. +type Data struct { + config.GlobalConfig `json:",inline,omitempty" yaml:",inline,omitempty"` + Target *config.GRPCClient `json:"target,omitempty" yaml:"target,omitempty"` + Strategies []*Strategy `json:"strategies,omitempty" yaml:"strategies,omitempty"` + Dataset *Dataset `json:"dataset,omitempty" yaml:"dataset,omitempty"` + Kubernetes *Kubernetes `json:"kubernetes,omitempty" yaml:"kubernetes,omitempty"` + Metadata map[string]string `json:"metadata,omitempty" yaml:"metadata,omitempty"` + MetaString string `json:"metadata_string,omitempty" yaml:"metadata_string,omitempty"` +} + +// Strategy represents a test strategy. +type Strategy struct { + TimeConfig ` yaml:",inline,omitempty" json:",inline,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Concurrency uint64 `yaml:"concurrency" json:"concurrency,omitempty"` + Operations []*Operation `yaml:"operations,omitempty" json:"operations,omitempty"` +} + +// Operation represents an individual operation configuration. +type Operation struct { + TimeConfig ` yaml:",inline,omitempty" json:",inline,omitempty"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Executions []*Execution `yaml:"executions,omitempty" json:"executions,omitempty"` +} + +// Execution represents the execution details for a given operation. +type Execution struct { + *BaseConfig ` yaml:",inline,omitempty" json:",inline,omitempty"` + TimeConfig ` yaml:",inline,omitempty" json:",inline,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Type OperationType `yaml:"type" json:"type,omitempty"` + Mode OperationMode `yaml:"mode" json:"mode,omitempty"` + Search *SearchQuery `yaml:"search,omitempty" json:"search,omitempty"` + Kubernetes *KubernetesConfig `yaml:"kubernetes,omitempty" json:"kubernetes,omitempty"` + Modification *ModificationConfig `yaml:"modification,omitempty" json:"modification,omitempty"` + ExpectedStatusCodes StatusCodes `yaml:"expected_status_codes,omitempty" json:"expected_status_codes,omitempty"` +} + +// TimeConfig holds time-related configuration values. +type TimeConfig struct { + Delay timeutil.DurationString `yaml:"delay" json:"delay,omitempty"` + Wait timeutil.DurationString `yaml:"wait" json:"wait,omitempty"` + Timeout timeutil.DurationString `yaml:"timeout" json:"timeout,omitempty"` +} + +// BaseConfig represents basic operational configuration parameters. +type BaseConfig struct { + Num uint64 `yaml:"num,omitempty" json:"num,omitempty"` + Offset uint64 `yaml:"offset,omitempty" json:"offset,omitempty"` + BulkSize uint64 `yaml:"bulk_size,omitempty" json:"bulk_size,omitempty"` + Parallelism uint64 `yaml:"parallelism,omitempty" json:"parallelism,omitempty"` + QPS uint64 `yaml:"qps,omitempty" json:"qps,omitempty"` + Limiter rate.Limiter `yaml:"-" json:"-"` +} + +// SearchQuery represents the parameters for a search query. +type SearchQuery struct { + K uint32 `yaml:"k,omitempty" json:"k,omitempty"` + Radius float32 `yaml:"radius,omitempty" json:"radius,omitempty"` + Epsilon float32 `yaml:"epsilon,omitempty" json:"epsilon,omitempty"` + AlgorithmString string `yaml:"algorithm,omitempty" json:"algorithm_string,omitempty"` + MinNum uint32 `yaml:"min_num,omitempty" json:"min_num,omitempty"` + Ratio float32 `yaml:"ratio,omitempty" json:"ratio,omitempty"` + Nprobe uint32 `yaml:"nprobe,omitempty" json:"nprobe,omitempty"` + Timeout timeutil.DurationString `yaml:"timeout,omitempty" json:"timeout,omitempty"` + Algorithm payload.Search_AggregationAlgorithm `yaml:"-" json:"-"` +} + +// ModificationConfig represents settings for modifications like insert or update. +type ModificationConfig struct { + SkipStrictExistCheck bool `yaml:"skip_strict_exist_check,omitempty" json:"skip_strict_exist_check,omitempty"` + Timestamp int64 `yaml:"timestamp,omitempty" json:"timestamp,omitempty"` +} + +// KubernetesConfig holds Kubernetes-specific settings. +type KubernetesConfig struct { + Kind KubernetesKind `yaml:"kind" json:"kind,omitempty"` + Namespace string `yaml:"namespace" json:"namespace,omitempty"` + Name string `yaml:"name" json:"name,omitempty"` + Action KubernetesAction `yaml:"action" json:"action,omitempty"` +} + +// Kubernetes holds configuration for Kubernetes environments. +type Kubernetes struct { + KubeConfig string `yaml:"kubeconfig" json:"kube_config,omitempty"` + PortForward *PortForward `yaml:"portforward,omitempty" json:"port_forward,omitempty"` +} + +// PortForward holds configuration for port forwarding. +type PortForward struct { + Enabled bool `yaml:"enabled" json:"enabled,omitempty"` + TargetPort Port `yaml:"target_port" json:"target_port,omitempty"` + LocalPort Port `yaml:"local_port" json:"local_port,omitempty"` + Namespace string `yaml:"namespace" json:"namespace,omitempty"` + ServiceName string `yaml:"service_name" json:"service_name,omitempty"` +} + +// Port represents a port as a string. +type Port string + +// Dataset holds dataset-related configuration. +type Dataset struct { + Name string `yaml:"name" json:"name,omitempty"` +} + +//////////////////////////////////////////////////////////////////////////////// +// Bind Section +//////////////////////////////////////////////////////////////////////////////// + +// Bind binds and validates the Data configuration. +// It processes nested configurations and metadata. +func (d *Data) Bind() (bound *Data, err error) { + if d == nil || + d.Strategies == nil || len(d.Strategies) == 0 || + d.Dataset == nil || + d.Target == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Data") + } + d.GlobalConfig.Bind() + + // Bind gRPC Target configuration if provided. + if d.Target != nil { + d.Target.Bind() + } + // Bind each Strategy. + for i, strategy := range d.Strategies { + var bs *Strategy + if bs, err = strategy.Bind(); err != nil { + return nil, err + } + d.Strategies[i] = bs + } + // Bind Dataset. + if d.Dataset != nil { + if d.Dataset, err = d.Dataset.Bind(); err != nil { + return nil, err + } + } + // Bind Kubernetes. + if d.Kubernetes != nil { + if d.Kubernetes, err = d.Kubernetes.Bind(); err != nil { + return nil, err + } + } + // Process metadata. + if d.Metadata == nil { + d.Metadata = make(map[string]string) + } + for _, meta := range strings.Split(config.GetActualValue(d.MetaString), ",") { + key, val, ok := strings.Cut(meta, "=") + if ok && key != "" && val != "" { + d.Metadata[config.GetActualValue(key)] = config.GetActualValue(val) + } + } + return d, nil +} + +// Bind binds and validates the Strategy configuration. +func (s *Strategy) Bind() (bound *Strategy, err error) { + if s == nil || s.Operations == nil || len(s.Operations) == 0 { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Strategy") + } + s.Name = config.GetActualValue(s.Name) + s.TimeConfig.Bind() + for i, op := range s.Operations { + var bo *Operation + if bo, err = op.Bind(); err != nil { + return nil, err + } + s.Operations[i] = bo + } + return s, nil +} + +// Bind binds and validates the Operation configuration. +func (o *Operation) Bind() (bound *Operation, err error) { + if o == nil || o.Executions == nil || len(o.Executions) == 0 { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Operation") + } + o.Name = config.GetActualValue(o.Name) + o.TimeConfig.Bind() + for i, exec := range o.Executions { + var be *Execution + if be, err = exec.Bind(); err != nil { + return nil, err + } + o.Executions[i] = be + } + return o, nil +} + +// Bind binds and validates the Execution configuration. +func (e *Execution) Bind() (bound *Execution, err error) { + if e == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Execution") + } + // Bind OperationType and OperationMode. + if e.Type, err = e.Type.Bind(); err != nil { + return + } + if e.Mode, err = e.Mode.Bind(); err != nil { + return + } + e.Name = config.GetActualValue(e.Name) + e.TimeConfig.Bind() + switch e.Type { + case OpSearch, + OpSearchByID, + OpLinearSearch, + OpLinearSearchByID, + OpInsert, + OpUpdate, + OpUpsert, + OpRemove, + OpRemoveByTimestamp, + OpObject, + OpListObject, + OpTimestamp, + OpExists: + if err != nil { + return nil, err + } + if e.BaseConfig == nil || e.BaseConfig.Num == 0 { + return nil, errors.Wrap(errors.ErrInvalidConfig, "BaseConfig and its Num are required for execute "+string(e.Type)) + } + if e.BaseConfig.QPS > 0 { + e.Limiter = rate.NewLimiter(int(e.BaseConfig.QPS)) + } + if e.Mode == OperationMultiple && e.BaseConfig.BulkSize == 0 { + return nil, errors.New("bulk_size must be greater than 0 for multiple operations") + } + switch e.Type { + case OpSearch, + OpSearchByID, + OpLinearSearch, + OpLinearSearchByID: + if e.Search == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "SearchConfig is required for execute") + } + if e.Search != nil { + if e.Search, err = e.Search.Bind(); err != nil { + return nil, err + } + } + case OpInsert, + OpUpdate, + OpUpsert, + OpRemove, + OpRemoveByTimestamp: + if e.Modification != nil { + if e.Modification, err = e.Modification.Bind(); err != nil { + return nil, err + } + } + } + if e.ExpectedStatusCodes, err = e.ExpectedStatusCodes.Bind(); err != nil { + return + } + case OpIndexInfo, + OpIndexDetail, + OpIndexStatistics, + OpIndexStatisticsDetail, + OpIndexProperty, + OpFlush: + case OpKubernetes: + if e.Kubernetes != nil { + if e.Kubernetes, err = e.Kubernetes.Bind(); err != nil { + return + } + } + case OpClient: + // do nothing + case OpWait: + // do nothing + default: + return nil, errors.Wrap(errors.ErrInvalidConfig, "unsupported operation type"+string(e.Type)) + } + bound = e + return +} + +// Bind binds the TimeConfig by expanding environment variables. +func (t *TimeConfig) Bind() (bound *TimeConfig) { + if t == nil { + return nil + } + t.Delay = config.GetActualValue(t.Delay) + t.Wait = config.GetActualValue(t.Wait) + t.Timeout = config.GetActualValue(t.Timeout) + return t +} + +// Bind binds and validates the SearchQuery configuration. +func (sq *SearchQuery) Bind() (bound *SearchQuery, err error) { + if sq == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on SearchQuery") + } + sq.Timeout = config.GetActualValue(sq.Timeout) + dur, err := sq.Timeout.Duration() + if err != nil || dur < 0 { + sq.Timeout = defaultTimeout + } + sq.AlgorithmString = config.GetActualValue(sq.AlgorithmString) + if sq.K == 0 { + sq.K = defaultTopK + } + if sq.Radius == 0 { + sq.Radius = -1 + } + switch trimStringForCompare(sq.AlgorithmString) { + case "concurrentqueue", "queue", "cqueue", "cq": + sq.Algorithm = payload.Search_ConcurrentQueue + case "sortslice", "slice", "sslice", "ss": + sq.Algorithm = payload.Search_SortSlice + case "sortpoolslice", "poolslice", "spslice", "pslice", "sps", "ps": + sq.Algorithm = payload.Search_SortPoolSlice + case "pairingheap", "pairheap", "pheap", "heap", "ph": + sq.Algorithm = payload.Search_PairingHeap + default: + sq.Algorithm = payload.Search_ConcurrentQueue + } + return sq, nil +} + +// Bind binds and validates the ModificationConfig. +func (m *ModificationConfig) Bind() (bound *ModificationConfig, err error) { + if m.Timestamp < 0 { + m.Timestamp = 0 + } + return m, nil +} + +// Bind expands environment variables for OperationType. +func (ot OperationType) Bind() (bound OperationType, err error) { + if ot == "" { + return "", errors.Wrap(errors.ErrInvalidConfig, "missing required fields on OperationType") + } + switch trimStringForCompare(config.GetActualValue(ot)) { + case "search", "ser", "s": + return OpSearch, nil + case "searchbyid", "serid", "sid", "sbyid": + return OpSearchByID, nil + case "linearsearch", "lsearch", "lser", "ls": + return OpLinearSearch, nil + case "linearsearchbyid", "lsearchbyid", "lserid", "lsbyid": + return OpLinearSearchByID, nil + case "insert", "ins", "i": + return OpInsert, nil + case "update", "upd", "u": + return OpUpdate, nil + case "upsert", "usert", "upst", "us": + return OpUpsert, nil + case "remove", "rem", "r", "delete", "del", "d": + return OpRemove, nil + case "removebytimestamp", "removets", "remts", "rmts", "dts": + return OpRemoveByTimestamp, nil + case "object", "obj", "o": + return OpObject, nil + case "listobject", "listobj", "lobj", "lo": + return OpListObject, nil + case "timestamp", "ts", "t": + return OpTimestamp, nil + case "exists", "exist", "ex", "e": + return OpExists, nil + case "indexinfo", "index", "info", "ii": + return OpIndexInfo, nil + case "indexdetail", "detail", "id": + return OpIndexDetail, nil + case "indexstatistics", "statistics", "stat", "is": + return OpIndexStatistics, nil + case "indexstatisticsdetail", "statisticsdetail", "statdetail", "isd": + return OpIndexStatisticsDetail, nil + case "indexproperty", "property", "prop", "ip": + return OpIndexProperty, nil + case "flush", "fl", "f": + return OpFlush, nil + case "kubernetes", "kube", "k8s": + return OpKubernetes, nil + case "client", "cli", "c", "grpc": + return OpClient, nil + case "wait": + return OpWait, nil + } + return bound, nil +} + +// Bind expands environment variables for OperationMode. +func (om OperationMode) Bind() (bound OperationMode, err error) { + if om == "" { + return "", errors.Wrap(errors.ErrInvalidConfig, "missing required fields on OperationMode") + } + switch trimStringForCompare(config.GetActualValue(om)) { + case "unary", "un", "u": + return OperationUnary, nil + case "stream", "str", "s": + return OperationStream, nil + case "multiple", "multi", "m": + return OperationMultiple, nil + case "other", "oth", "o": + return OperationOther, nil + } + return bound, nil +} + +// Bind expands environment variables for StatusCode. +func (sc StatusCode) Bind() (bound StatusCode, err error) { + switch trimStringForCompare(config.GetActualValue(sc)) { + case StatusCodeOK: + return StatusCodeOK, nil + case StatusCodeCanceled: + return StatusCodeCanceled, nil + case StatusCodeUnknown: + return StatusCodeUnknown, nil + case StatusCodeInvalidArgument: + return StatusCodeInvalidArgument, nil + case StatusCodeDeadlineExceeded: + return StatusCodeDeadlineExceeded, nil + case StatusCodeNotFound: + return StatusCodeNotFound, nil + case StatusCodeAlreadyExists: + return StatusCodeAlreadyExists, nil + case StatusCodePermissionDenied: + return StatusCodePermissionDenied, nil + case StatusCodeResourceExhausted: + return StatusCodeResourceExhausted, nil + case StatusCodeFailedPrecondition: + return StatusCodeFailedPrecondition, nil + case StatusCodeAborted: + return StatusCodeAborted, nil + case StatusCodeOutOfRange: + return StatusCodeOutOfRange, nil + case StatusCodeUnimplemented: + return StatusCodeUnimplemented, nil + case StatusCodeInternal: + return StatusCodeInternal, nil + case StatusCodeUnavailable: + return StatusCodeUnavailable, nil + case StatusCodeDataLoss: + return StatusCodeDataLoss, nil + case StatusCodeUnauthenticated: + return StatusCodeUnauthenticated, nil + } + return bound, nil +} + +// Bind binds each StatusCode in StatusCodes. +func (sc StatusCodes) Bind() (bound StatusCodes, err error) { + for i, code := range sc { + var bcode StatusCode + if bcode, err = code.Bind(); err != nil { + return nil, err + } + sc[i] = bcode + } + return sc, nil +} + +// Bind expands environment variables for KubernetesKind. +func (kk KubernetesKind) Bind() (bound KubernetesKind, err error) { + switch trimStringForCompare(config.GetActualValue(kk)) { + case "configmap", "config", "cm": + return ConfigMap, nil + case "cronjob", "cron", "cj": + return CronJob, nil + case "daemonset", "daemon", "ds": + return DaemonSet, nil + case "deployment", "deploy", "dep": + return Deployment, nil + case "job", "jb": + return Job, nil + case "pod", "pd": + return Pod, nil + case "secret", "sec": + return Secret, nil + case "service", "svc": + return Service, nil + case "statefulset", "stateful", "sts": + return StatefulSet, nil + } + return bound, nil +} + +// Bind expands environment variables for KubernetesAction. +func (ka KubernetesAction) Bind() (bound KubernetesAction, err error) { + switch trimStringForCompare(config.GetActualValue(ka)) { + case "rollout", "roll", "r": + return KubernetesActionRollout, nil + case "delete", "del", "d": + return KubernetesActionDelete, nil + case "get", "g": + return KubernetesActionGet, nil + case "exec", "e": + return KubernetesActionExec, nil + case "apply", "a": + return KubernetesActionApply, nil + case "create", "c": + return KubernetesActionCreate, nil + case "patch", "p": + return KubernetesActionPatch, nil + case "scale", "s": + return KubernetesActionScale, nil + } + return bound, nil +} + +// Bind binds and validates the KubernetesConfig. +func (k *KubernetesConfig) Bind() (bound *KubernetesConfig, err error) { + if k == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on KubernetesConfig") + } + k.Namespace = config.GetActualValue(k.Namespace) + k.Name = config.GetActualValue(k.Name) + if k.Action, err = k.Action.Bind(); err != nil { + return nil, err + } + if k.Kind, err = k.Kind.Bind(); err != nil { + return nil, err + } + if k.Namespace == "" || k.Name == "" || k.Action == "" || k.Kind == "" { + return nil, errors.Errorf("kubernetes config: namespace: %s, name: %s, action: %s, and kind: %s must be provided", + k.Namespace, k.Name, k.Action, k.Kind) + } + return k, nil +} + +// Bind binds and validates the Kubernetes configuration. +func (k *Kubernetes) Bind() (bound *Kubernetes, err error) { + if k == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Kubernetes") + } + k.KubeConfig = config.GetActualValue(k.KubeConfig) + if k.KubeConfig == "" { + log.Warn("Kubernetes.KubeConfig is empty; please check your configuration") + } else if !file.Exists(k.KubeConfig) { + log.Warn("Kubernetes: kubeconfig file does not exist: ", k.KubeConfig) + } + if k.PortForward != nil { + if k.PortForward, err = k.PortForward.Bind(); err != nil { + return nil, err + } + } + return k, nil +} + +// Bind binds and validates the PortForward configuration. +func (pf *PortForward) Bind() (bound *PortForward, err error) { + if pf == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on PortForward") + } + pf.ServiceName = config.GetActualValue(pf.ServiceName) + pf.Namespace = config.GetActualValue(pf.Namespace) + if pf.ServiceName == "" { + return nil, errors.New("portforward: service name cannot be empty") + } + if pf.Namespace == "" { + return nil, errors.New("portforward: namespace cannot be empty") + } + if _, err = pf.TargetPort.Bind(); err != nil { + return nil, err + } + if _, err = pf.LocalPort.Bind(); err != nil { + return nil, err + } + if pf.TargetPort.Port() == 0 { + pf.TargetPort = localPort + } + if pf.LocalPort.Port() == 0 { + pf.LocalPort = localPort + } + return pf, nil +} + +// Bind expands environment variables for Port. +func (p *Port) Bind() (bound *Port, err error) { + port := config.GetActualValue(*p) + return &port, nil +} + +// Bind binds and validates the Dataset configuration. +func (d *Dataset) Bind() (bound *Dataset, err error) { + if d == nil { + return nil, errors.Wrap(errors.ErrInvalidConfig, "missing required fields on Dataset") + } + d.Name = config.GetActualValue(d.Name) + if d.Name == "" || !file.Exists(d.Name) { + return nil, errors.Errorf("dataset name: %s cannot be empty", d.Name) + } + return d, nil +} + +//////////////////////////////////////////////////////////////////////////////// +// Func Section +//////////////////////////////////////////////////////////////////////////////// + +// Timing interface provides access to time configuration values. +type Timing interface { + GetDelay() timeutil.DurationString + GetWait() timeutil.DurationString + GetTimeout() timeutil.DurationString +} + +// GetDelay returns the Delay value from TimeConfig. +func (t *TimeConfig) GetDelay() timeutil.DurationString { + if t == nil { + return "" + } + return t.Delay +} + +// GetWait returns the Wait value from TimeConfig. +func (t *TimeConfig) GetWait() timeutil.DurationString { + if t == nil { + return "" + } + return t.Wait +} + +// GetTimeout returns the Timeout value from TimeConfig. +func (t *TimeConfig) GetTimeout() timeutil.DurationString { + if t == nil { + return "" + } + return t.Timeout +} + +// Equals compares StatusCode with a given string ignoring case. +func (sc StatusCode) Equals(c string) bool { + bound, _ := sc.Bind() // error ignored as Bind never errors + return strings.EqualFold(bound.String(), c) +} + +func (sc StatusCode) Status() codes.Code { + switch trimStringForCompare(sc) { + case StatusCodeOK: + return codes.OK + case StatusCodeCanceled: + return codes.Canceled + case StatusCodeUnknown: + return codes.Unknown + case StatusCodeInvalidArgument: + return codes.InvalidArgument + case StatusCodeDeadlineExceeded: + return codes.DeadlineExceeded + case StatusCodeNotFound: + return codes.NotFound + case StatusCodeAlreadyExists: + return codes.AlreadyExists + case StatusCodePermissionDenied: + return codes.PermissionDenied + case StatusCodeResourceExhausted: + return codes.ResourceExhausted + case StatusCodeFailedPrecondition: + return codes.FailedPrecondition + case StatusCodeAborted: + return codes.Aborted + case StatusCodeOutOfRange: + return codes.OutOfRange + case StatusCodeUnimplemented: + return codes.Unimplemented + case StatusCodeInternal: + return codes.Internal + case StatusCodeUnavailable: + return codes.Unavailable + case StatusCodeDataLoss: + return codes.DataLoss + case StatusCodeUnauthenticated: + return codes.Unauthenticated + } + return codes.Unknown +} + +// String returns the string representation of StatusCode. +func (sc StatusCode) String() string { + return string(sc) +} + +// Equals checks if any StatusCode in StatusCodes equals the given string. +func (sc StatusCodes) Equals(c string) bool { + for _, s := range sc { + if s.Equals(c) { + return true + } + } + return false +} + +// Port returns the numeric value of the Port. +func (p Port) Port() uint16 { + bp, _ := p.Bind() // error ignored as Bind never errors + port, err := strconv.ParseUint(string(*bp), 10, 16) + if err != nil { + return 0 + } + return uint16(port) +} + +// ////////////////////////////////////////////////////////////////////////////// +// Const Section +// ////////////////////////////////////////////////////////////////////////////// +const ( + localPort Port = "8081" + defaultTopK = uint32(10) + defaultTimeout = timeutil.DurationString("3s") +) + +//////////////////////////////////////////////////////////////////////////////// +// Load Function +//////////////////////////////////////////////////////////////////////////////// + +// Load reads the configuration from the specified file path, +// binds and validates the configuration, and returns the complete Data configuration. +func Load(path string) (cfg *Data, err error) { + log.Debugf("loading test client configuration from %s", path) + cfg = new(Data) + if err = config.Read(path, &cfg); err != nil { + return + } + if cfg == nil { + err = errors.ErrInvalidConfig + return + } + if cfg, err = cfg.Bind(); err != nil { + return + } + log.Debug(config.ToRawYaml(cfg)) + return +} + +var reps = strings.NewReplacer(" ", "", "-", "", "_", "", ":", "", ";", "", ",", "", ".", "") + +func trimStringForCompare[S ~string](str S) S { + return S(reps.Replace(string(str))) +} diff --git a/tests/v2/e2e/config/enums.go b/tests/v2/e2e/config/enums.go new file mode 100644 index 0000000000..f961cdb909 --- /dev/null +++ b/tests/v2/e2e/config/enums.go @@ -0,0 +1,112 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package config provides configuration types and logic for loading and binding configuration values. +// This file includes detailed Bind methods for all configuration types with extensive comments. +package config + +type OperationType string + +const ( + OpSearch OperationType = "search" + OpSearchByID OperationType = "search_by_id" + OpLinearSearch OperationType = "linear_search" + OpLinearSearchByID OperationType = "linear_search_by_id" + + OpInsert OperationType = "insert" + OpUpdate OperationType = "update" + OpUpsert OperationType = "upsert" + OpRemove OperationType = "remove" + OpRemoveByTimestamp OperationType = "remove_by_timestamp" + + OpObject OperationType = "object" + OpListObject OperationType = "list_object" + OpTimestamp OperationType = "timestamp" + OpExists OperationType = "exists" + + OpIndexInfo OperationType = "index_info" + OpIndexDetail OperationType = "index_detail" + OpIndexStatistics OperationType = "index_statistics" + OpIndexStatisticsDetail OperationType = "index_statistics_detail" + OpIndexProperty OperationType = "index_property" + OpFlush OperationType = "flush" + + OpKubernetes OperationType = "kubernetes" + OpClient OperationType = "client" + OpWait OperationType = "wait" +) + +type StatusCode string + +type StatusCodes []StatusCode + +const ( + StatusCodeOK StatusCode = "ok" + StatusCodeCanceled StatusCode = "canceled" + StatusCodeUnknown StatusCode = "unknown" + StatusCodeInvalidArgument StatusCode = "invalidargument" + StatusCodeDeadlineExceeded StatusCode = "deadlineexceeded" + StatusCodeNotFound StatusCode = "notfound" + StatusCodeAlreadyExists StatusCode = "alreadyexists" + StatusCodePermissionDenied StatusCode = "permissiondenied" + StatusCodeResourceExhausted StatusCode = "resourceexhausted" + StatusCodeFailedPrecondition StatusCode = "failedprecondition" + StatusCodeAborted StatusCode = "aborted" + StatusCodeOutOfRange StatusCode = "outofrange" + StatusCodeUnimplemented StatusCode = "unimplemented" + StatusCodeInternal StatusCode = "internal" + StatusCodeUnavailable StatusCode = "unavailable" + StatusCodeDataLoss StatusCode = "dataloss" + StatusCodeUnauthenticated StatusCode = "unauthenticated" +) + +type OperationMode string + +const ( + OperationUnary OperationMode = "unary" + OperationStream OperationMode = "stream" + OperationMultiple OperationMode = "multiple" + OperationOther OperationMode = "other" +) + +type KubernetesAction string + +const ( + KubernetesActionRollout KubernetesAction = "rollout" + KubernetesActionDelete KubernetesAction = "delete" + KubernetesActionGet KubernetesAction = "get" + KubernetesActionExec KubernetesAction = "exec" + KubernetesActionApply KubernetesAction = "apply" + KubernetesActionCreate KubernetesAction = "create" + KubernetesActionPatch KubernetesAction = "patch" + KubernetesActionScale KubernetesAction = "scale" +) + +type KubernetesKind string + +const ( + ConfigMap KubernetesKind = "configmap" + CronJob KubernetesKind = "cronjob" + DaemonSet KubernetesKind = "daemonset" + Deployment KubernetesKind = "deployment" + Job KubernetesKind = "job" + Pod KubernetesKind = "pod" + Secret KubernetesKind = "secret" + Service KubernetesKind = "service" + StatefulSet KubernetesKind = "statefulset" +) diff --git a/tests/v2/e2e/crud/crud_test.go b/tests/v2/e2e/crud/crud_test.go new file mode 100644 index 0000000000..54bce79675 --- /dev/null +++ b/tests/v2/e2e/crud/crud_test.go @@ -0,0 +1,68 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides e2e tests using ann-benchmarks datasets +package crud + +import ( + "os" + "testing" + + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/params" + "github.com/vdaas/vald/internal/strings" + "github.com/vdaas/vald/tests/v2/e2e/config" + "github.com/vdaas/vald/tests/v2/e2e/hdf5" +) + +var ( + cfg *config.Data + ds *hdf5.Dataset +) + +func TestMain(m *testing.M) { + var err error + p, fail, err := params.New( + params.WithName("vald/e2e"), + params.WithOverrideDefault(true), + params.WithArgumentFilters( + func(s string) bool { + return strings.HasPrefix(s, "-test.") + }, + ), + ).Parse() + if fail || err != nil || p.ConfigFilePath() == "" { + log.Fatalf("failed to parse the parameters: %v", err) + } + + if testing.Short() { + log.Info("skipping this pkg test when -short because e2e test takes a long time") + os.Exit(0) + } + + cfg, err = config.Load(p.ConfigFilePath()) + if err != nil { + log.Fatalf("failed to load config: %v", err) + } + log.Init(log.WithLevel(cfg.Logging.Level), log.WithFormat(cfg.Logging.Format)) + ds, err = hdf5.HDF5ToDataset(cfg.Dataset.Name) + if err != nil { + log.Fatalf("failed to load dataset: %v", err) + } + os.Exit(m.Run()) +} diff --git a/tests/v2/e2e/crud/dataset_test.go b/tests/v2/e2e/crud/dataset_test.go new file mode 100644 index 0000000000..ea832e9a5c --- /dev/null +++ b/tests/v2/e2e/crud/dataset_test.go @@ -0,0 +1,46 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides e2e tests using ann-benchmarks datasets +package crud + +import ( + "testing" + + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/tests/v2/e2e/config" +) + +// -------------------------------------------------- +// Dataset Slices Construction Using Cycle Iterators | +// -------------------------------------------------- + +// getDatasetSlices constructs iterators for the Train, Test, and Neighbors datasets. +// For Train and Test, if more samples are requested than are available, +// a NoiseModifier (via a noiseGenerator) is used to add noise on‑the‑fly. +func getDatasetSlices( + t *testing.T, e *config.Execution, +) (train, test iter.Cycle[[][]float32, []float32], neighbors iter.Cycle[[][]int, []int]) { + t.Helper() + if ds == nil || e == nil || e.BaseConfig == nil { + return nil, nil, nil + } + return ds.TrainCycle(e.Num, e.Offset), + ds.TestCycle(e.Num, e.Offset), + ds.NeighborsCycle(e.Num, e.Offset) +} diff --git a/tests/v2/e2e/crud/grpc_test.go b/tests/v2/e2e/crud/grpc_test.go new file mode 100644 index 0000000000..9d10218dc8 --- /dev/null +++ b/tests/v2/e2e/crud/grpc_test.go @@ -0,0 +1,215 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package crud provides end-to-end tests using ann-benchmarks datasets. +package crud + +import ( + "context" + "slices" + "strconv" + "sync/atomic" + "testing" + + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/net/grpc/proto" + "github.com/vdaas/vald/internal/net/grpc/status" + "github.com/vdaas/vald/internal/sync/errgroup" + "github.com/vdaas/vald/tests/v2/e2e/config" +) + +// Type aliases for generic search functions. +type ( + // grpcCall is a generic function type for making gRPC calls. + grpcCall[Q, R proto.Message] func(ctx context.Context, query Q, opts ...grpc.CallOption) (response R, err error) + // newStream is a generic type for functions that create a new gRPC stream. + newStream[S grpc.ClientStream] func(ctx context.Context, opts ...grpc.CallOption) (S, error) + // newRequest is a function type that creates a new request. + newRequest[Q proto.Message] func(t *testing.T, idx uint64, id string, vec []float32, e *config.Execution) Q + // newMultiRequest is a generic type for functions that build bulk search requests. + newMultiRequest[R, S proto.Message] func(t *testing.T, reqs []R) S + // callback is a function type that processes the response and error from a gRPC call. + callback[R proto.Message] func(t *testing.T, idx uint64, res R, err error) bool +) + +// handleGRPCCallError centralizes the gRPC error handling and logging. +// It compares the error's status code with the expected codes from the plan. +// If the error is expected, it logs a message; otherwise, it logs an error. +func handleGRPCCallError(t *testing.T, err error, plan *config.Execution) { + t.Helper() + if err != nil { + if st, ok := status.FromError(err); ok && st != nil { + if len(plan.ExpectedStatusCodes) != 0 && !plan.ExpectedStatusCodes.Equals(st.Code().String()) { + t.Errorf("unexpected error: %v", st) + } + return + } + t.Errorf("failed to execute gRPC call error: %v", err) + } +} + +func single[Q, R proto.Message]( + t *testing.T, + ctx context.Context, + idx uint64, + plan *config.Execution, + req Q, + call grpcCall[Q, R], + callback ...callback[R], +) { + t.Helper() + if plan.BaseConfig != nil && plan.BaseConfig.Limiter != nil { + plan.BaseConfig.Limiter.Wait(ctx) + } + // Execute the modify gRPC call. + res, err := call(ctx, req) + if err != nil { + // Handle the error using the centralized error handler. + handleGRPCCallError(t, err, plan) + return + } + + for _, cb := range callback { + if cb != nil { + if !cb(t, idx, res, err) { + return + } + } + } + return +} + +func unary[Q, R proto.Message]( + t *testing.T, + ctx context.Context, + data iter.Cycle[[][]float32, []float32], + plan *config.Execution, + call grpcCall[Q, R], + newReq newRequest[Q], + callback ...callback[R], +) { + t.Helper() + // Create an error group to manage concurrent requests. + eg, ctx := errgroup.New(ctx) + // Set the concurrency limit from the plan configuration. + if plan != nil && plan.BaseConfig != nil { + // Set the concurrency limit from the plan configuration. + eg.SetLimit(int(plan.Parallelism)) + } + for i, vec := range data.Seq2(ctx) { + // Copy id to avoid data race. + idx := i + // Execute request in a goroutine. + eg.Go(func() error { + single(t, ctx, idx, plan, newReq(t, idx, strconv.FormatUint(idx, 10), vec, plan), call, callback...) + return nil + }) + } + // Wait for all goroutines to complete. + eg.Wait() +} + +func multi[Q, M, R proto.Message]( + t *testing.T, + ctx context.Context, + data iter.Cycle[[][]float32, []float32], + plan *config.Execution, + call grpcCall[M, R], + addReqs newRequest[Q], + toReq newMultiRequest[Q, M], + callbacks ...callback[R], +) { + t.Helper() + eg, ctx := errgroup.New(ctx) + // Set the concurrency limit from the plan configuration. + if plan != nil && plan.BaseConfig != nil { + // Set the concurrency limit from the plan configuration. + eg.SetLimit(int(plan.Parallelism)) + } + // Initialize a slice to hold the bulk requests. + reqs := make([]Q, 0, plan.BulkSize) + for i, vec := range data.Seq2(ctx) { + id := strconv.FormatUint(i, 10) + // Append a new request to the bulk slice using the provided builder. + reqs = append(reqs, addReqs(t, i, id, vec, plan)) + // If the bulk size is reached, send the batch. + if len(reqs) >= int(plan.BulkSize) { + // Capture the current batch. + batch := slices.Clone(reqs) + idx := i + // Meset the bulk request slice for the next batch. + reqs = reqs[:0] + eg.Go(func() error { + single(t, ctx, idx, plan, toReq(t, batch), call, callbacks...) + return nil + }) + } + } + eg.Go(func() error { + single(t, ctx, data.Len(), plan, toReq(t, reqs), call, callbacks...) + return nil + }) + eg.Wait() +} + +func stream[S grpc.ClientStream, Q, R proto.Message]( + t *testing.T, + ctx context.Context, + data iter.Cycle[[][]float32, []float32], + plan *config.Execution, + newStream newStream[S], + newReq newRequest[Q], + callbacks ...callback[R], +) { + t.Helper() + // Create a new stream using the provided stream function. + stream, err := newStream(ctx) + if err != nil { + t.Error(err) + return + } + // qidx tracks the current index within the modify configuration slice. + // idx tracks the current vector index. + var idx atomic.Uint64 + var sidx atomic.Uint64 + // Use a bidirectional stream client to send requests and receive responses. + err = grpc.BidirectionalStreamClient(stream, int(plan.Parallelism), func() *Q { + // If we have processed all vectors, return nil to close the stream. + if idx.Load() >= data.Len() { + return nil + } + // Build the modify configuration and return the request. + req := newReq(t, idx.Load(), strconv.FormatUint(idx.Load(), 10), data.At(idx.Load()), plan) + idx.Add(1) + return &req + }, func(res *R, err error) bool { + id := sidx.Add(1) - 1 + for _, cb := range callbacks { + if cb != nil { + if !cb(t, id, *res, err) { + return false + } + } + } + return true + }) + if err != nil { + t.Errorf("failed to complete stream: %v", err) + } +} diff --git a/tests/v2/e2e/crud/index_test.go b/tests/v2/e2e/crud/index_test.go new file mode 100644 index 0000000000..1cc89968b5 --- /dev/null +++ b/tests/v2/e2e/crud/index_test.go @@ -0,0 +1,62 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides end-to-end tests using ann-benchmarks datasets. +package crud + +import ( + "context" + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc/proto" + "github.com/vdaas/vald/tests/v2/e2e/config" +) + +func indexCallBack[R proto.Message](res R, err error) bool { + if err != nil { + return true + } + log.Infof("response: %v", res) + return true +} + +func (r *runner) processIndex(t *testing.T, ctx context.Context, plan *config.Execution) { + t.Helper() + if plan == nil { + t.Fatalf("index operation plan is nil") + return + } + switch plan.Type { + case config.OpIndexInfo: + single(t, ctx, 0, plan, new(payload.Empty), r.client.IndexInfo) + case config.OpIndexDetail: + single(t, ctx, 0, plan, new(payload.Empty), r.client.IndexDetail) + case config.OpIndexStatistics: + single(t, ctx, 0, plan, new(payload.Empty), r.client.IndexStatistics) + case config.OpIndexStatisticsDetail: + single(t, ctx, 0, plan, new(payload.Empty), r.client.IndexStatisticsDetail) + case config.OpIndexProperty: + single(t, ctx, 0, plan, new(payload.Empty), r.client.IndexProperty) + case config.OpFlush: + single(t, ctx, 0, plan, new(payload.Flush_Request), r.client.Flush) + default: + t.Fatalf("unsupported index operation: %s", plan.Type) + } +} diff --git a/tests/v2/e2e/crud/modification_test.go b/tests/v2/e2e/crud/modification_test.go new file mode 100644 index 0000000000..bcf382251c --- /dev/null +++ b/tests/v2/e2e/crud/modification_test.go @@ -0,0 +1,184 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides end-to-end tests using ann-benchmarks datasets. +package crud + +import ( + "context" + "testing" + "time" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/tests/v2/e2e/config" +) + +// Predefined request builder functions for unary modify requests. +var ( + insertRequest newRequest[*payload.Insert_Request] = func(t *testing.T, idx uint64, id string, vec []float32, plan *config.Execution) *payload.Insert_Request { + ts, skip := toModificationConfig(plan) + return &payload.Insert_Request{ + Vector: &payload.Object_Vector{ + Id: id, + Vector: vec, + Timestamp: ts, + }, + Config: &payload.Insert_Config{ + Timestamp: ts, + SkipStrictExistCheck: skip, + }, + } + } + insertMultipleRequest newMultiRequest[*payload.Insert_Request, *payload.Insert_MultiRequest] = func(t *testing.T, reqs []*payload.Insert_Request) *payload.Insert_MultiRequest { + return &payload.Insert_MultiRequest{ + Requests: reqs, + } + } + updateRequest newRequest[*payload.Update_Request] = func(t *testing.T, idx uint64, id string, vec []float32, plan *config.Execution) *payload.Update_Request { + ts, skip := toModificationConfig(plan) + return &payload.Update_Request{ + Vector: &payload.Object_Vector{ + Id: id, + Vector: vec, + Timestamp: ts, + }, + Config: &payload.Update_Config{ + Timestamp: ts, + SkipStrictExistCheck: skip, + }, + } + } + updateMultipleRequest newMultiRequest[*payload.Update_Request, *payload.Update_MultiRequest] = func(t *testing.T, reqs []*payload.Update_Request) *payload.Update_MultiRequest { + return &payload.Update_MultiRequest{ + Requests: reqs, + } + } + upsertRequest newRequest[*payload.Upsert_Request] = func(t *testing.T, idx uint64, id string, vec []float32, plan *config.Execution) *payload.Upsert_Request { + ts, skip := toModificationConfig(plan) + return &payload.Upsert_Request{ + Vector: &payload.Object_Vector{ + Id: id, + Vector: vec, + Timestamp: ts, + }, + Config: &payload.Upsert_Config{ + Timestamp: ts, + SkipStrictExistCheck: skip, + }, + } + } + upsertMultipleRequest newMultiRequest[*payload.Upsert_Request, *payload.Upsert_MultiRequest] = func(t *testing.T, reqs []*payload.Upsert_Request) *payload.Upsert_MultiRequest { + return &payload.Upsert_MultiRequest{ + Requests: reqs, + } + } + removeRequest newRequest[*payload.Remove_Request] = func(t *testing.T, idx uint64, id string, vec []float32, plan *config.Execution) *payload.Remove_Request { + ts, skip := toModificationConfig(plan) + return &payload.Remove_Request{ + Id: &payload.Object_ID{ + Id: id, + }, + Config: &payload.Remove_Config{ + Timestamp: ts, + SkipStrictExistCheck: skip, + }, + } + } + removeMultipleRequest newMultiRequest[*payload.Remove_Request, *payload.Remove_MultiRequest] = func(t *testing.T, reqs []*payload.Remove_Request) *payload.Remove_MultiRequest { + return &payload.Remove_MultiRequest{ + Requests: reqs, + } + } + removeByTimestampRequest newRequest[*payload.Remove_TimestampRequest] = func(t *testing.T, idx uint64, id string, vec []float32, plan *config.Execution) *payload.Remove_TimestampRequest { + ts, _ := toModificationConfig(plan) + if ts == 0 { + ts = time.Now().UnixNano() + } + return &payload.Remove_TimestampRequest{ + Timestamps: []*payload.Remove_Timestamp{ + { + Timestamp: ts, + Operator: payload.Remove_Timestamp_Le, + }, + }, + } + } +) + +func (r *runner) processModification( + t *testing.T, + ctx context.Context, + train iter.Cycle[[][]float32, []float32], + plan *config.Execution, +) { + t.Helper() + if plan == nil { + t.Fatal("modification plan is nil") + return + } + switch plan.Type { + case config.OpInsert: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.Insert, insertRequest) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiInsert, insertRequest, insertMultipleRequest) + case config.OperationStream: + stream[vald.Insert_StreamInsertClient, *payload.Insert_Request, *payload.Object_Location](t, ctx, train, plan, r.client.StreamInsert, insertRequest) + } + case config.OpUpdate: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.Update, updateRequest) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiUpdate, updateRequest, updateMultipleRequest) + case config.OperationStream: + stream[vald.Update_StreamUpdateClient, *payload.Update_Request, *payload.Object_Location](t, ctx, train, plan, r.client.StreamUpdate, updateRequest) + } + case config.OpUpsert: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.Upsert, upsertRequest) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiUpsert, upsertRequest, upsertMultipleRequest) + case config.OperationStream: + stream[vald.Upsert_StreamUpsertClient, *payload.Upsert_Request, *payload.Object_Location](t, ctx, train, plan, r.client.StreamUpsert, upsertRequest) + } + case config.OpRemove: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.Remove, removeRequest) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiRemove, removeRequest, removeMultipleRequest) + case config.OperationStream: + stream[vald.Remove_StreamRemoveClient, *payload.Remove_Request, *payload.Object_Location](t, ctx, train, plan, r.client.StreamRemove, removeRequest) + } + case config.OpRemoveByTimestamp: + single(t, ctx, 0, plan, removeByTimestampRequest(t, 0, "", nil, plan), r.client.RemoveByTimestamp) + } +} + +func toModificationConfig(plan *config.Execution) (ts int64, skip bool) { + if plan != nil && plan.Modification != nil { + ts = plan.Modification.Timestamp + skip = plan.Modification.SkipStrictExistCheck + } + return ts, skip +} diff --git a/tests/v2/e2e/crud/object_test.go b/tests/v2/e2e/crud/object_test.go new file mode 100644 index 0000000000..5a54109a2a --- /dev/null +++ b/tests/v2/e2e/crud/object_test.go @@ -0,0 +1,124 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides end-to-end tests using ann-benchmarks datasets. +package crud + +import ( + "context" + "testing" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/apis/grpc/v1/vald" + "github.com/vdaas/vald/internal/io" + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/internal/net/grpc/codes" + "github.com/vdaas/vald/tests/v2/e2e/config" +) + +var ( + objectRequest newRequest[*payload.Object_VectorRequest] = func(t *testing.T, _ uint64, id string, _ []float32, _ *config.Execution) *payload.Object_VectorRequest { + return &payload.Object_VectorRequest{ + Id: existsRequest(t, 0, id, nil, nil), + } + } + + existsRequest newRequest[*payload.Object_ID] = func(t *testing.T, _ uint64, id string, _ []float32, _ *config.Execution) *payload.Object_ID { + return &payload.Object_ID{ + Id: id, + } + } + + timestampRequest newRequest[*payload.Object_TimestampRequest] = func(t *testing.T, _ uint64, id string, _ []float32, _ *config.Execution) *payload.Object_TimestampRequest { + return &payload.Object_TimestampRequest{ + Id: existsRequest(t, 0, id, nil, nil), + } + } +) + +func (r *runner) processObject( + t *testing.T, + ctx context.Context, + train iter.Cycle[[][]float32, []float32], + plan *config.Execution, +) { + t.Helper() + if plan == nil { + t.Fatal("object operation plan is nil") + return + } + switch plan.Type { + case config.OpObject: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.GetObject, objectRequest) + case config.OperationMultiple: + t.Errorf("unsupported Object operation %s for %s", plan.Mode, plan.Type) + case config.OperationStream: + stream[vald.Object_StreamGetObjectClient, *payload.Object_VectorRequest, *payload.Object_Vector](t, ctx, train, plan, r.client.StreamGetObject, objectRequest) + } + case config.OpTimestamp: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.GetTimestamp, timestampRequest) + case config.OperationMultiple, config.OperationStream: + t.Errorf("unsupported Timestamp operation %s for %s", plan.Mode, plan.Type) + } + case config.OpExists: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.Exists, existsRequest) + case config.OperationMultiple, config.OperationStream: + t.Errorf("unsupported Exists operation %s for %s", plan.Mode, plan.Type) + } + case config.OpListObject: + switch plan.Mode { + case config.OperationMultiple, config.OperationStream: + t.Errorf("unsupported ListObject operation %s for %s", plan.Mode, plan.Type) + case config.OperationUnary, config.OperationOther: + stream, err := r.client.StreamListObject(ctx, new(payload.Object_List_Request)) + if err != nil { + t.Error(err) + return + } + cnt := uint64(0) + defer stream.CloseSend() + for { + cnt++ + res, err := stream.Recv() + if err != nil { + if err == io.EOF { + return + } + if plan.ExpectedStatusCodes != nil && plan.ExpectedStatusCodes.Equals(codes.ToString(res.GetStatus().GetCode())) { + t.Logf("expected error: %v", err) + } else { + t.Errorf("unexpected error: %v", err) + } + + break + } + t.Logf("successfully get vector %v", res.GetVector()) + if cnt >= train.Len() { + return + } + + } + } + } +} diff --git a/tests/v2/e2e/crud/search_test.go b/tests/v2/e2e/crud/search_test.go new file mode 100644 index 0000000000..73a7b0c703 --- /dev/null +++ b/tests/v2/e2e/crud/search_test.go @@ -0,0 +1,259 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides end-to-end tests using ann-benchmarks datasets. +package crud + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/vdaas/vald/apis/grpc/v1/payload" + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/internal/net/grpc/proto" + "github.com/vdaas/vald/internal/strings" + "github.com/vdaas/vald/tests/v2/e2e/config" + "google.golang.org/protobuf/types/known/wrapperspb" +) + +// recall calculates the recall ratio by comparing the list of result IDs +// with the expected neighbors provided as a slice of integers. +// It returns the ratio of matching neighbor IDs to the total number of expected neighbors. +func recall(t *testing.T, resultIDs []string, neighbors []int) float64 { + t.Helper() + // Create a set of expected neighbor IDs for fast lookup. + ns := make(map[string]struct{}) + for _, n := range neighbors { + ns[strconv.Itoa(n)] = struct{}{} + } + + // Count how many resultIDs exist in the set of expected neighbor IDs. + var count int + for _, r := range resultIDs { + if _, ok := ns[r]; ok { + count++ + } + } + // Return the recall as a ratio. + return float64(count) / float64(len(neighbors)) +} + +// calculateRecall extracts the topK result IDs from the search response and computes the recall. +// It uses the provided index to select the expected neighbor IDs from a global source (ds.Neighbors). +func calculateRecall(t *testing.T, neighbors []int, res *payload.Search_Response) float64 { + t.Helper() + // Extract the IDs from the results. + topKIDs := make([]string, 0, len(res.GetResults())) + for _, d := range res.GetResults() { + topKIDs = append(topKIDs, d.GetId()) + } + + // If no results are returned, log an error. + if len(topKIDs) == 0 { + t.Errorf("empty result is returned for test ID %s: %#v", res.GetRequestId(), topKIDs) + return 0 + } + // ds.Neighbors is assumed to be defined globally with expected neighbor IDs. + return recall(t, topKIDs, neighbors[:len(topKIDs)]) +} + +// newSearchConfig creates a new Search_Config instance based on the provided search query and test ID. +// It parses the timeout string into nanoseconds, sets a default timeout if needed, and conditionally sets the ratio. +func newSearchConfig(t *testing.T, id string, query *config.SearchQuery) *payload.Search_Config { + t.Helper() + if query == nil { + t.Errorf("search query is nil") + } + return &payload.Search_Config{ + // The RequestId is composed of the test ID and the name of the aggregation algorithm. + RequestId: id + "-" + payload.Search_AggregationAlgorithm_name[int32(query.Algorithm)], + Num: query.K, + Radius: query.Radius, + Epsilon: query.Epsilon, + // Parse the timeout value; use 1 second as default if parsing fails or timeout is empty. + Timeout: func() int64 { + if query.Timeout != "" { + timeout, err := query.Timeout.Duration() + if err == nil { + return timeout.Nanoseconds() + } + } + return time.Second.Nanoseconds() + }(), + AggregationAlgorithm: query.Algorithm, + MinNum: query.MinNum, + // Conditionally set the ratio if it is non-zero. + Ratio: func() *wrapperspb.FloatValue { + if query.Ratio != 0 { + return wrapperspb.Float(query.Ratio) + } + return nil + }(), + Nprobe: query.Nprobe, + } +} + +// newSearchRequest is a generic type for functions that create search requests. +type newSearchRequest[R proto.Message] func(id string, vec []float32, scfg *payload.Search_Config) R + +// Predefined request builder functions for unary and multi search requests. +var ( + // searchRequest builds a Search_Request given a vector and search configuration. + // The id parameter is ignored in this case. + searchRequest newRequest[*payload.Search_Request] = func(t *testing.T, _ uint64, id string, vec []float32, e *config.Execution) *payload.Search_Request { + return &payload.Search_Request{ + Vector: vec, + Config: newSearchConfig(t, id, e.Search), + } + } + + // searchIDRequest builds a Search_IDRequest given an id and search configuration. + // The vector is ignored for search-by-ID requests. + searchIDRequest newRequest[*payload.Search_IDRequest] = func(t *testing.T, _ uint64, id string, _ []float32, e *config.Execution) *payload.Search_IDRequest { + return &payload.Search_IDRequest{ + Id: id, + Config: newSearchConfig(t, id, e.Search), + } + } + + // searchMultiRequest builds a Search_MultiRequest from a slice of Search_Request. + searchMultiRequest newMultiRequest[*payload.Search_Request, *payload.Search_MultiRequest] = func(t *testing.T, reqs []*payload.Search_Request) *payload.Search_MultiRequest { + return &payload.Search_MultiRequest{ + Requests: reqs, + } + } + + // searchMultiIDRequest builds a Search_MultiIDRequest from a slice of Search_IDRequest. + searchMultiIDRequest newMultiRequest[*payload.Search_IDRequest, *payload.Search_MultiIDRequest] = func(t *testing.T, reqs []*payload.Search_IDRequest) *payload.Search_MultiIDRequest { + return &payload.Search_MultiIDRequest{ + Requests: reqs, + } + } +) + +// processSearch dispatches the search operation based on the type and mode specified in the plan. +// It supports unary, multiple (bulk), and stream operations for both vector search and search-by-ID. +func (r *runner) processSearch( + t *testing.T, + ctx context.Context, + test, train iter.Cycle[[][]float32, []float32], + neighbors iter.Cycle[[][]int, []int], + plan *config.Execution, +) { + t.Helper() + if plan == nil { + t.Fatal("search operation plan is nil") + return + } + + if plan.BaseConfig == nil { + t.Fatal("base configuration is nil") + return + } + if plan.Search == nil { + t.Fatal("search configuration is nil") + return + } + + switch plan.Type { + case config.OpSearch: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + // For unary search requests, use the generic unarySearch function with the searchRequest builder. + unary(t, ctx, test, plan, r.client.Search, searchRequest, checkUnarySearchResponse(neighbors)) + case config.OperationMultiple: + // For bulk search requests, use the generic multiSearch function with searchRequest and searchMultiRequest builders. + multi(t, ctx, test, plan, r.client.MultiSearch, searchRequest, searchMultiRequest, checkMultiSearchResponse(neighbors)) + case config.OperationStream: + // For streaming search requests, use the generic streamSearch function with the searchRequest builder. + stream(t, ctx, test, plan, r.client.StreamSearch, searchRequest, checkStreamSearchResponse(neighbors)) + } + case config.OpSearchByID: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, train, plan, r.client.SearchByID, searchIDRequest, checkUnarySearchResponse(neighbors)) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiSearchByID, searchIDRequest, searchMultiIDRequest, checkMultiSearchResponse(neighbors)) + case config.OperationStream: + stream(t, ctx, train, plan, r.client.StreamSearchByID, searchIDRequest, checkStreamSearchResponse(neighbors)) + } + case config.OpLinearSearch: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, test, plan, r.client.LinearSearch, searchRequest, checkUnarySearchResponse(neighbors)) + case config.OperationMultiple: + multi(t, ctx, test, plan, r.client.MultiLinearSearch, searchRequest, searchMultiRequest, checkMultiSearchResponse(neighbors)) + case config.OperationStream: + stream(t, ctx, test, plan, r.client.StreamLinearSearch, searchRequest, checkStreamSearchResponse(neighbors)) + } + case config.OpLinearSearchByID: + switch plan.Mode { + case config.OperationUnary, config.OperationOther: + unary(t, ctx, test, plan, r.client.LinearSearchByID, searchIDRequest, checkUnarySearchResponse(neighbors)) + case config.OperationMultiple: + multi(t, ctx, train, plan, r.client.MultiLinearSearchByID, searchIDRequest, searchMultiIDRequest, checkMultiSearchResponse(neighbors)) + case config.OperationStream: + stream(t, ctx, train, plan, r.client.StreamLinearSearchByID, searchIDRequest, checkStreamSearchResponse(neighbors)) + } + } +} + +func checkUnarySearchResponse( + neighbors iter.Cycle[[][]int, []int], +) func(t *testing.T, idx uint64, res *payload.Search_Response, err error) bool { + return func(t *testing.T, idx uint64, res *payload.Search_Response, err error) bool { + rc := calculateRecall(t, neighbors.At(idx), res) + t.Logf("request id %s searched recall: %f, payload %s", res.GetRequestId(), rc, res.String()) + return true + } +} + +func checkMultiSearchResponse( + neighbors iter.Cycle[[][]int, []int], +) func(t *testing.T, idx uint64, res *payload.Search_Responses, err error) bool { + return func(t *testing.T, idx uint64, res *payload.Search_Responses, err error) bool { + // For each response in the bulk response, log the recall. + for _, r := range res.GetResponses() { + if !checkUnarySearchResponse(neighbors)(t, getIndexFromSearchResponse(t, r), r, err) { + return false + } + } + return true + } +} + +func checkStreamSearchResponse( + neighbors iter.Cycle[[][]int, []int], +) func(t *testing.T, idx uint64, res *payload.Search_Response, err error) bool { + return func(t *testing.T, idx uint64, res *payload.Search_Response, err error) bool { + return checkUnarySearchResponse(neighbors)(t, getIndexFromSearchResponse(t, res), res, err) + } +} + +func getIndexFromSearchResponse(t *testing.T, res *payload.Search_Response) (idx uint64) { + t.Helper() + id, _, _ := strings.Cut(res.GetRequestId(), "-") + var err error + idx, err = strconv.ParseUint(id, 10, 64) + if err != nil { + t.Error(err) + } + return idx +} diff --git a/tests/v2/e2e/crud/strategy_test.go b/tests/v2/e2e/crud/strategy_test.go new file mode 100644 index 0000000000..03266b1890 --- /dev/null +++ b/tests/v2/e2e/crud/strategy_test.go @@ -0,0 +1,338 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package crud provides e2e tests using ann-benchmarks datasets +package crud + +import ( + "context" + "fmt" + "testing" + "time" + + agent "github.com/vdaas/vald/internal/client/v1/client/agent/core" + "github.com/vdaas/vald/internal/client/v1/client/vald" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/net/grpc" + "github.com/vdaas/vald/internal/sync/errgroup" + "github.com/vdaas/vald/tests/v2/e2e/config" + k8s "github.com/vdaas/vald/tests/v2/e2e/kubernetes" + "github.com/vdaas/vald/tests/v2/e2e/kubernetes/portforward" + "google.golang.org/grpc/metadata" +) + +type runner struct { + rootCtx context.Context + client vald.Client + aclient agent.Client + k8s k8s.Client +} + +func TestE2EStrategy(t *testing.T) { + if cfg == nil || cfg.Strategies == nil { + t.Fatal("test setting or strategies is nil, please add test configuration yaml file by -config option") + } + + ctx, cancel := context.WithCancel(t.Context()) + defer cancel() + + var err error + r := new(runner) + if cfg.Kubernetes != nil { + r.k8s, err = k8s.NewClient(cfg.Kubernetes.KubeConfig, "") + if err != nil { + t.Errorf("failed to create kubernetes client: %v", err) + } + if cfg.Kubernetes.PortForward.Enabled { + if r.k8s == nil { + t.Fatal("kubernetes client is nil") + } + + pfd, err := portforward.New( + portforward.WithAddress("localhost"), + portforward.WithClient(r.k8s), + portforward.WithNamespace(cfg.Kubernetes.PortForward.Namespace), + portforward.WithServiceName(cfg.Kubernetes.PortForward.ServiceName), + portforward.WithPorts(map[uint16]uint16{ + cfg.Kubernetes.PortForward.LocalPort.Port(): cfg.Kubernetes.PortForward.TargetPort.Port(), + }), + ) + if err != nil { + if pfd != nil { + pfd.Stop() + } + t.Fatalf("failed to portforward: %v", err) + } + defer pfd.Stop() + _, err = pfd.Start(ctx) + if err != nil { + if pfd != nil { + pfd.Stop() + } + t.Fatalf("failed to portforward: %v", err) + } + } + } + + r.client, ctx, err = newClient(t, ctx, cfg.Metadata) + if err != nil { + t.Fatalf("failed to create client: %v", err) + } + if r.client == nil { + t.Fatal("gRPC E2E client is nil") + } + ech, err := r.client.Start(ctx) + if err != nil { + t.Fatalf("failed to start client: %v", err) + } + + r.aclient, err = agent.New(agent.WithValdClient(r.client)) + if err != nil { + t.Fatalf("failed to create agent client: %v", err) + } + + go func() { + select { + case <-ctx.Done(): + return + case err := <-ech: + if err != nil { + t.Errorf("client daemon returned error: %v", err) + } + } + }() + defer func() { + err = r.client.Stop(ctx) + if err != nil { + t.Errorf("failed to stop client: %v", err) + } + }() + t.Logf("connected addrs: %v", r.client.GRPCClient().ConnectedAddrs()) + + for i, st := range cfg.Strategies { + r.processStrategy(t, ctx, i, st) + } +} + +func (r *runner) processStrategy(t *testing.T, ctx context.Context, idx int, st *config.Strategy) { + t.Helper() + if r == nil || st == nil { + return + } + + t.Run(fmt.Sprintf("#%d: strategy=%s", idx, st.Name), func(tt *testing.T) { + if err := executeWithTimings(tt, ctx, st, st.Name, "strategy", func(ttt *testing.T, ctx context.Context) error { + eg, egctx := errgroup.New(ctx) + if st.Concurrency > 0 { + eg.SetLimit(int(st.Concurrency)) + ttt.Logf("concurrency is set to %d, the operations will execute concurrently with limit (%d)", st.Concurrency, st.Concurrency) + } else { + ttt.Logf("concurrency is not set, the operations will execute concurrently with no limit (%d)", len(st.Operations)) + } + + for i, op := range st.Operations { + if op != nil { + i, op := i, op + eg.Go(func() error { + r.processOperation(ttt, egctx, i, op) + return nil + }) + } + } + + return eg.Wait() + }); err != nil { + tt.Errorf("failed to process operations: %v", err) + } + }) +} + +func (r *runner) processOperation( + t *testing.T, ctx context.Context, idx int, op *config.Operation, +) { + t.Helper() + if r == nil || op == nil { + return + } + + t.Run(fmt.Sprintf("#%d: operation=%s", idx, op.Name), func(tt *testing.T) { + if err := executeWithTimings(tt, ctx, op, op.Name, "operation", func(ttt *testing.T, ctx context.Context) error { + ttt.Helper() + for i, e := range op.Executions { + r.processExecution(ttt, ctx, i, e) + } + return nil + }); err != nil { + tt.Errorf("failed to process operation: %v", err) + } + }) +} + +func (r *runner) processExecution(t *testing.T, ctx context.Context, idx int, e *config.Execution) { + t.Helper() + if r == nil || e == nil { + return + } + + t.Run(fmt.Sprintf("#%d: execution=%s type=%s mode=%s", idx, e.Name, e.Type, e.Mode), func(tt *testing.T) { + if err := executeWithTimings(tt, ctx, e, e.Name, "execution", func(ttt *testing.T, ctx context.Context) error { + switch e.Type { + case config.OpSearch, + config.OpSearchByID, + config.OpLinearSearch, + config.OpLinearSearchByID, + config.OpInsert, + config.OpUpdate, + config.OpUpsert, + config.OpRemove, + config.OpRemoveByTimestamp, + config.OpObject, + config.OpListObject, + config.OpTimestamp, + config.OpExists: + train, test, neighbors := getDatasetSlices(ttt, e) + if e.BaseConfig != nil { + log.Infof("started execution name: %s, type: %s, mode: %s, execution: %d, num: %d, offset: %d", + e.Name, e.Type, e.Mode, idx, e.Num, e.Offset) + defer log.Infof("finished execution name: %s type: %s, mode: %s, execution: %d, num: %d, offset: %d", + e.Name, e.Type, e.Mode, idx, e.Num, e.Offset) + } + switch e.Type { + case config.OpSearch, + config.OpSearchByID, + config.OpLinearSearch, + config.OpLinearSearchByID: + r.processSearch(ttt, ctx, train, test, neighbors, e) + case config.OpInsert, + config.OpUpdate, + config.OpUpsert, + config.OpRemove, + config.OpRemoveByTimestamp: + r.processModification(ttt, ctx, train, e) + case config.OpObject, + config.OpListObject, + config.OpTimestamp, + config.OpExists: + r.processObject(ttt, ctx, train, e) + } + case config.OpIndexInfo, + config.OpIndexDetail, + config.OpIndexStatistics, + config.OpIndexStatisticsDetail, + config.OpIndexProperty, + config.OpFlush: + log.Infof("type: %s, mode: %s, execution: %d", e.Type, e.Mode, idx) + r.processIndex(ttt, ctx, e) + case config.OpKubernetes: + // TODO implement kubernetes operation here, eg. delete pod, rollout restart, etc. + case config.OpClient: + // TODO implement gRPC client operation here, eg. start, stop, etc. + case config.OpWait: + // do nothing + default: + ttt.Errorf("unsupported operation type: %s detected during execution %d", e.Type, idx) + } + return nil + }); err != nil { + tt.Errorf("failed to process execution: %v", err) + } + }) +} + +func executeWithTimings[T config.Timing]( + t *testing.T, + ctx context.Context, + cfg T, + name, prefix string, + fn func(*testing.T, context.Context) error, +) error { + t.Helper() + if delay := cfg.GetDelay(); delay != "" { + dur, err := delay.Duration() + if err != nil { + t.Errorf("failed to parse delay duration: %s, error: %v", delay, err) + } + if dur > 0 { + log.Infof("delay is set to %s, this %s/%s will start after %s", delay, prefix, name, dur.String()) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(dur): + } + } + } + + var cancel context.CancelFunc = func() {} + if timeout := cfg.GetTimeout(); timeout != "" { + dur, err := timeout.Duration() + if err != nil { + t.Errorf("failed to parse timeout duration: %s, error: %v", timeout, err) + } + if dur > 0 { + t.Logf("timeout is set to %s, this %s/%s will stop after %s", timeout, prefix, name, dur.String()) + ctx, cancel = context.WithTimeout(ctx, dur) + } + } + defer cancel() + + err := fn(t, ctx) + + if wait := cfg.GetWait(); wait != "" { + dur, werr := wait.Duration() + if werr != nil { + t.Errorf("failed to parse wait duration: %s, error: %v", wait, werr) + return err + } + if dur > 0 { + log.Infof("\"%s.wait: %s\", wait configuration detected, this %s/%s is already finished, but will wait for %s", prefix, wait, prefix, name, dur.String()) + select { + case <-ctx.Done(): + return ctx.Err() + case <-time.After(dur): + } + } + } + + return err +} + +func newClient( + t *testing.T, ctx context.Context, meta map[string]string, +) (client vald.Client, mctx context.Context, err error) { + t.Helper() + if cfg == nil || cfg.Target == nil { + return nil, nil, errors.ErrGRPCTargetAddrNotFound + } + gopts, err := cfg.Target.Opts() + if err != nil { + return nil, nil, err + } + client, err = vald.New( + vald.WithClient( + grpc.New(gopts...), + ), + ) + if err != nil { + return nil, nil, err + } + if meta != nil { + mctx = metadata.NewOutgoingContext(ctx, metadata.New(meta)) + } + return client, mctx, nil +} diff --git a/tests/v2/e2e/hdf5/hdf5.go b/tests/v2/e2e/hdf5/hdf5.go new file mode 100644 index 0000000000..5cf4b834b1 --- /dev/null +++ b/tests/v2/e2e/hdf5/hdf5.go @@ -0,0 +1,127 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package hdf5 provides hdf5 utilities for e2e testing +package hdf5 + +import ( + "github.com/vdaas/vald/internal/iter" + "github.com/vdaas/vald/internal/sync" + "github.com/vdaas/vald/internal/test/data/vector/noise" + "gonum.org/v1/hdf5" +) + +type Dataset struct { + Train [][]float32 + Test [][]float32 + Neighbors [][]int + once sync.Once + noiseFunc noise.Func + maxLen uint64 +} + +func (d *Dataset) TrainCycle(num, offset uint64) iter.Cycle[[][]float32, []float32] { + if num > d.maxLen && d.noiseFunc == nil { + d.InitNoiseFunc(num) + } + return iter.NewCycle(d.Train, num, offset, d.noiseFunc) +} + +func (d *Dataset) TestCycle(num, offset uint64) iter.Cycle[[][]float32, []float32] { + if num > d.maxLen && d.noiseFunc == nil { + d.InitNoiseFunc(num) + } + return iter.NewCycle(d.Test, num, offset, d.noiseFunc) +} + +func (d *Dataset) NeighborsCycle(num, offset uint64) iter.Cycle[[][]int, []int] { + return iter.NewCycle(d.Neighbors, num, offset, nil) +} + +func (d *Dataset) InitNoiseFunc(num uint64, opts ...noise.Option) noise.Func { + if num > d.maxLen && d.noiseFunc == nil { + d.once.Do(func() { + data := d.Train + if len(data) == 0 || len(d.Test) > len(data) { + data = d.Test + } + d.noiseFunc = noise.New(data, num, opts...).Mod() + }) + } + return d.noiseFunc +} + +func HDF5ToDataset(name string) (*Dataset, error) { + file, err := hdf5.OpenFile(name, hdf5.F_ACC_RDONLY) + if err != nil { + return nil, err + } + + defer file.Close() + + train, err := ReadDataset[float32](file, "train") + if err != nil { + return nil, err + } + + test, err := ReadDataset[float32](file, "test") + if err != nil { + return nil, err + } + + neighbors, err := ReadDataset[int](file, "neighbors") + if err != nil { + return nil, err + } + + return &Dataset{ + Train: train, + Test: test, + Neighbors: neighbors, + maxLen: uint64(max(len(train), len(test), len(neighbors))), + }, nil +} + +func ReadDataset[T any](file *hdf5.File, name string) ([][]T, error) { + data, err := file.OpenDataset(name) + if err != nil { + return nil, err + } + defer data.Close() + + dataspace := data.Space() + defer dataspace.Close() + + dims, _, err := dataspace.SimpleExtentDims() + if err != nil { + return nil, err + } + height, width := int(dims[0]), int(dims[1]) + + rawFloats := make([]T, dataspace.SimpleExtentNPoints()) + if err := data.Read(&rawFloats); err != nil { + return nil, err + } + + vecs := make([][]T, height) + for i := 0; i < height; i++ { + vecs[i] = rawFloats[i*width : i*width+width] + } + + return vecs, nil +} diff --git a/tests/v2/e2e/kubernetes/client.go b/tests/v2/e2e/kubernetes/client.go new file mode 100644 index 0000000000..498fdfb855 --- /dev/null +++ b/tests/v2/e2e/kubernetes/client.go @@ -0,0 +1,124 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package kubernetes provides kubernetes client functions +package kubernetes + +import ( + "os" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/file" + "github.com/vdaas/vald/internal/log" + "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + kclient "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/manager" +) + +type Client interface { + GetClientSet() kubernetes.Interface + GetRESRConfig() *rest.Config +} + +type client struct { + rest *rest.Config + clientset *kubernetes.Clientset + manager manager.Manager + client kclient.WithWatch +} + +func NewClient(kubeConfig, currentContext string) (c Client, err error) { + if kubeConfig == "" { + kubeConfig = os.Getenv(clientcmd.RecommendedConfigPathEnvVar) + if kubeConfig == "" { + if file.Exists(clientcmd.RecommendedHomeFile) { + kubeConfig = clientcmd.RecommendedHomeFile + } + if kubeConfig == "" { + c, err = inClusterConfigClient() + if err != nil { + return nil, err + } + return c, nil + } + } + } + + cfg, err := clientcmd.NewNonInteractiveDeferredLoadingClientConfig( + &clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig}, + &clientcmd.ConfigOverrides{ + ClusterInfo: clientcmdapi.Cluster{}, + CurrentContext: currentContext, + }).ClientConfig() + if err != nil { + log.Debugf("failed to build config from kubeConfig path %s,\terror: %v", kubeConfig, err) + var ierr error + c, ierr = inClusterConfigClient() + if ierr != nil { + return nil, errors.Join(err, ierr) + } + return c, nil + } + + c, err = newClient(cfg) + if err != nil { + log.Debugf("failed to build config from kubeConfig path %s,\terror: %v", kubeConfig, err) + var ierr error + c, ierr = inClusterConfigClient() + if ierr != nil { + return nil, errors.Join(err, ierr) + } + } + return c, nil +} + +func newClient(cfg *rest.Config) (Client, error) { + if cfg.QPS == 0.0 { + cfg.QPS = 20.0 + } + if cfg.Burst == 0 { + cfg.Burst = 30 + } + clientset, err := kubernetes.NewForConfig(cfg) + if err != nil { + return nil, err + } + return &client{ + rest: cfg, + clientset: clientset, + }, nil +} + +func inClusterConfigClient() (Client, error) { + cfg, err := rest.InClusterConfig() + if err != nil { + return nil, err + } + return newClient(cfg) +} + +func (c *client) GetClientSet() kubernetes.Interface { + return c.clientset +} + +func (c *client) GetRESRConfig() *rest.Config { + return c.rest +} diff --git a/tests/v2/e2e/kubernetes/portforward/option.go b/tests/v2/e2e/kubernetes/portforward/option.go new file mode 100644 index 0000000000..c5f6ba4556 --- /dev/null +++ b/tests/v2/e2e/kubernetes/portforward/option.go @@ -0,0 +1,101 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package portforward provides a persistent port forwarding daemon for Kubernetes services. +package portforward + +import ( + "net/http" + + "github.com/vdaas/vald/internal/backoff" + "github.com/vdaas/vald/internal/sync/errgroup" + k8s "github.com/vdaas/vald/tests/v2/e2e/kubernetes" +) + +// Option represents the functional option for backoff. +type Option func(*portForward) + +var defaultOptions = []Option{ + WithHTTPClient(http.DefaultClient), + WithBackoff(backoff.New()), + WithNamespace("default"), +} + +func WithClient(client k8s.Client) Option { + return func(pf *portForward) { + if client != nil { + pf.client = client + } + } +} + +func WithBackoff(bo backoff.Backoff) Option { + return func(pf *portForward) { + if bo != nil { + pf.backoff = bo + } + } +} + +func WithErrGroup(eg errgroup.Group) Option { + return func(pf *portForward) { + if eg != nil { + pf.eg = eg + } + } +} + +func WithNamespace(ns string) Option { + return func(pf *portForward) { + if ns != "" { + pf.namespace = ns + } + } +} + +func WithServiceName(name string) Option { + return func(pf *portForward) { + if name != "" { + pf.serviceName = name + } + } +} + +func WithAddress(addrs ...string) Option { + return func(pf *portForward) { + if addrs != nil { + pf.addresses = addrs + } + } +} + +func WithHTTPClient(c *http.Client) Option { + return func(pf *portForward) { + if c != nil { + pf.httpClient = c + } + } +} + +func WithPorts(pairs map[uint16]uint16) Option { + return func(pf *portForward) { + if pairs != nil { + pf.ports = pairs + } + } +} diff --git a/tests/v2/e2e/kubernetes/portforward/portforward.go b/tests/v2/e2e/kubernetes/portforward/portforward.go new file mode 100644 index 0000000000..7b20d548bc --- /dev/null +++ b/tests/v2/e2e/kubernetes/portforward/portforward.go @@ -0,0 +1,424 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package portforward provides a persistent port forwarding daemon for Kubernetes services. +package portforward + +import ( + "context" + "fmt" + "net/http" + "os" + "slices" + "sync" + "sync/atomic" + + "github.com/vdaas/vald/internal/backoff" + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/log" + "github.com/vdaas/vald/internal/safety" + "github.com/vdaas/vald/internal/sync/errgroup" + k8s "github.com/vdaas/vald/tests/v2/e2e/kubernetes" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/tools/portforward" + "k8s.io/client-go/transport/spdy" +) + +// Forwarder defines the interface for a persistent port forwarding daemon. +type Forwarder interface { + // Start launches the port forward daemon and returns an error channel (named "ech") + // to report runtime issues. + Start(ctx context.Context) (<-chan error, error) + // Stop gracefully terminates the port forwarding daemon. + Stop() error +} + +// portForward is the concrete implementation of the Forwarder interface. +// It holds all configuration and state required to run the persistent port forward daemon. +type portForward struct { + // Client provides access to the Kubernetes API. + client k8s.Client + + // EndpointsClient is used to watch the Endpoints resource. + eclient k8s.EndpointClient + + // Backoff settings for the connection loop. + backoff backoff.Backoff + + // errgroup is used to manage the lifecycle of the daemon goroutines. + eg errgroup.Group + + // Namespace where the service and pods reside. + namespace string + // ServiceName is the target service name used to fetch endpoints. + serviceName string + // Addresses are the local bind addresses for the port forward. + addresses []string + // Ports maps local ports to target pod ports. + ports map[uint16]uint16 + + // HTTP client used for SPDY transport. + httpClient *http.Client + + // targets holds the current list of available pod names (extracted from Endpoints). + targets []string + // current is used for efficient round-robin selection. + current atomic.Uint64 // using atomic operations for concurrent safety + + // cancel cancels the overall port forward daemon context. + cancel context.CancelFunc + + // ech is the error channel used to report errors during runtime. + ech chan error + + // mu protects access to the targets slice. + mu sync.RWMutex + + healthy atomic.Bool +} + +// NewForwarder creates a new instance of a Forwarder with default backoff settings. +func New(opts ...Option) (Forwarder, error) { + pf := new(portForward) + for _, opt := range append(defaultOptions, opts...) { + opt(pf) + } + if pf.client == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if pf.namespace == "" { + return nil, errors.ErrUndefinedNamespace + } + if pf.serviceName == "" { + return nil, errors.ErrUndefinedService + } + if len(pf.addresses) == 0 { + return nil, errors.ErrPortForwardAddressNotFound + } + if len(pf.ports) == 0 { + return nil, errors.ErrPortForwardPortPairNotFound + } + pf.eclient = k8s.Endpoints(pf.client, pf.namespace) + + if pf.httpClient == nil { + pf.httpClient = http.DefaultClient + } + return pf, nil +} + +// updateTargets safely replaces the current target pod list and resets the round-robin counter. +func (pf *portForward) updateTargets(pods []string) { + pods = slices.Clip(pods) + slices.Sort(pods) + pods = slices.Compact(pods) + pf.mu.Lock() + pf.targets = pods + pf.mu.Unlock() + pf.current.Store(0) +} + +// getNextPod returns a pod name using a counter-based round-robin strategy. +// It uses an atomic counter with a modulus operation to avoid slice rotation. +func (pf *portForward) getNextPod() (string, error) { + idx := pf.current.Add(1) + pf.mu.RLock() + defer pf.mu.RUnlock() + if len(pf.targets) == 0 { + return "", fmt.Errorf("no available pods") + } + pod := pf.targets[int(idx-1)%len(pf.targets)] + return pod, nil +} + +// Start launches the port forwarding daemon. +// It starts two goroutines: +// 1. runEndpointWatcher: continuously monitors Endpoints and updates the target pod list. +// 2. runConnectionLoop: repeatedly establishes port forwarding using round-robin selection with exponential backoff. +func (pf *portForward) Start(ctx context.Context) (<-chan error, error) { + // Create a cancelable context for the entire daemon. + ctx, pf.cancel = context.WithCancel(ctx) + + // Initialize the error channel (named "ech"). + pf.ech = make(chan error, 2) + + if pf.eg == nil { + pf.eg, ctx = errgroup.New(ctx) + } + + // Perform an initial update of the target pod list. + pf.loadTargets(ctx) + + // Start the endpoints watcher goroutine. + pf.eg.Go(safety.RecoverFunc(func() (err error) { + // Create a watch on the Endpoints resource using the short syntax. + watcher, err := pf.endpointsWatcher(ctx) + if err != nil { + return err + } + defer watcher.Stop() + + // Process events from the watcher channel. + for { + select { + case <-ctx.Done(): + return ctx.Err() + case _, ok := <-watcher.ResultChan(): + if !ok { + watcher.Stop() + log.Error("endpoints watcher channel closed, restarting watcher") + watcher, err = pf.endpointsWatcher(ctx) + if err != nil { + select { + case <-ctx.Done(): + return ctx.Err() + case pf.ech <- err: + } + return err + } + } else { + // On any event, update the target pod list. + pf.loadTargets(ctx) + } + } + } + })) + + pf.eg.Go(safety.RecoverFunc(func() (err error) { + for { + select { + case <-ctx.Done(): + return ctx.Err() + default: + if pf.backoff != nil { + _, err = pf.backoff.Do(ctx, func(ctx context.Context) (any, bool, error) { + return nil, true, pf.portForwardToService(ctx) + }) + } else { + err = pf.portForwardToService(ctx) + } + if err != nil { + if !errors.Is(err, context.Canceled) && !errors.Is(err, context.DeadlineExceeded) { + log.Errorf("port forward connection loop ended with error: %v", err) + } + select { + case <-ctx.Done(): + return ctx.Err() + case pf.ech <- err: + } + } + } + } + })) + + for { + select { + case <-ctx.Done(): + return pf.ech, ctx.Err() + default: + if pf.healthy.Load() { + return pf.ech, nil + } + } + } +} + +// Stop gracefully terminates the port forwarding daemon by canceling the context +// and waiting for all goroutines to finish. +func (pf *portForward) Stop() (err error) { + if pf.cancel != nil { + pf.cancel() + } + err = pf.eg.Wait() + close(pf.ech) + return err +} + +func (pf *portForward) endpointsWatcher(ctx context.Context) (w watch.Interface, err error) { + w, err = pf.eclient.Watch(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("metadata.name=%s", pf.serviceName), + }) + if err != nil { + log.Errorf("failed to watch endpoints for service %s: %v", pf.serviceName, err) + return nil, err + } + return w, err +} + +// loadTargets retrieves the current Endpoints for the service, +// extracts the associated pod names, and updates the internal targets. +func (pf *portForward) loadTargets(ctx context.Context) { + endpoints, err := pf.eclient.Get(ctx, pf.serviceName, metav1.GetOptions{}) + if err != nil { + log.Errorf("failed to get endpoints for service %s: %v", pf.serviceName, err) + return + } + pods := make([]string, 0, len(endpoints.Subsets)) + for _, subset := range endpoints.Subsets { + for _, addr := range subset.Addresses { + if addr.TargetRef != nil && addr.TargetRef.Kind == "Pod" { + pods = append(pods, addr.TargetRef.Name) + } + } + } + if len(pods) == 0 { + log.Errorf("no pods found in endpoints for service %s", pf.serviceName) + return + } + pf.updateTargets(pods) +} + +// portForwardToServicePod +func (pf *portForward) portForwardToService(ctx context.Context) (err error) { + // Retrieve the next available pod. + podName, err := pf.getNextPod() + if err != nil || podName == "" { + log.Errorf("Port forward connection failed: %v", err) + return errors.ErrNoAvailablePods + } + + var cancel context.CancelFunc + ctx, cancel = context.WithCancel(ctx) + defer cancel() + + log.Infof("Attempting port forward to pod: %s on %v:%v", podName, pf.addresses, pf.ports) + // Create an inner context for this port forward session. + stop, ech, err := PortforwardExtended(ctx, pf.client, pf.namespace, podName, pf.addresses, pf.ports, pf.httpClient) + if err != nil { + log.Errorf("Failed to establish port forward to pod %s: %v", podName, err) + if stop != nil { + stop() + } + return err + } + defer stop() + + pf.healthy.Store(true) + defer pf.healthy.Store(false) + + log.Infof("successfully established port forward to pod %s", podName) + + // Wait for the port forward session to end or the context to be cancelled. + select { + case err = <-ech: + if err != nil { + log.Errorf("Port forward session ended with error on pod %s: %v", podName, err) + return err + } + log.Infof("Port forward session ended normally on pod %s", podName) + return nil + case <-ctx.Done(): + return ctx.Err() + } +} + +// PortforwardExtended establishes port forwarding for a specific pod. +// It is used internally by the connection loop. +func PortforwardExtended( + ctx context.Context, + c k8s.Client, + namespace, podName string, + addresses []string, + ports map[uint16]uint16, + hc *http.Client, +) (cancel context.CancelFunc, errorChan <-chan error, err error) { + if c == nil { + return cancel, nil, errors.ErrKubernetesClientNotFound + } + // Create a cancelable context for the port forward session. + ctx, cancel = context.WithCancel(ctx) + + //tctx, tcancel := context.WithTimeout(ctx, time.Second*30) + //_, ok, err := k8s.WaitForStatus(tctx, k8s.Pod(c, namespace), podName, k8s.StatusAvailable) + //tcancel() + //if !ok || err != nil { + // return cancel, nil, errors.Join(err, errors.ErrPodIsNotRunning(namespace, podName)) + //} + //log.Debugf("pod %s is running", podName) + + if hc == nil { + hc = http.DefaultClient + } + + // Set up the SPDY round tripper required for port forwarding. + transport, upgrader, err := spdy.RoundTripperFor(c.GetRESRConfig()) + if err != nil { + return cancel, nil, err + } + hc.Transport = transport + + if addresses == nil { + return cancel, nil, errors.ErrPortForwardAddressNotFound + } + + // Build port pairs in the format "local:target". + portPairs := make([]string, 0, len(ports)) + for local, target := range ports { + portPairs = append(portPairs, fmt.Sprintf("%d:%d", local, target)) + } + if len(portPairs) == 0 { + return cancel, nil, errors.ErrPortForwardPortPairNotFound + } + slices.Sort(portPairs) + portPairs = slices.Clip(slices.Compact(portPairs)) + slices.Sort(addresses) + addresses = slices.Clip(slices.Compact(addresses)) + + // Create a channel to signal when the port forwarder is ready. + readyChan := make(chan struct{}) + + // Construct the URL for the pod's portforward subresource. + // Create a new port forwarder instance. + pf, err := portforward.NewOnAddresses( + spdy.NewDialer(upgrader, hc, http.MethodPost, c.GetClientSet().CoreV1().RESTClient().Post(). + Resource("pods"). + Namespace(namespace). + Name(podName). + SubResource("portforward").URL()), + addresses, portPairs, ctx.Done(), readyChan, os.Stdout, os.Stderr, + ) + if err != nil { + log.Errorf("failed to create port forwarder, addresses: %v, portPairs: %v, error: %v", addresses, portPairs, err) + return cancel, nil, err + } + + // Prepare the error channel (named "ech") to report errors. + ech := make(chan error, 1) + errgroup.Go(safety.RecoverFunc(func() (err error) { + defer cancel() + defer close(ech) + log.Debugf("port forwarder starting on %v:%v", addresses, portPairs) + // ForwardPorts blocks until the session ends. + if err = pf.ForwardPorts(); err != nil { + select { + case <-ctx.Done(): + case ech <- err: + } + } + return nil + })) + + // Wait until the port forwarder signals readiness or context cancellation. + select { + case <-ctx.Done(): + return cancel, ech, ctx.Err() + case <-readyChan: + log.Debugf("port forwarder ready for pod %s on %v", podName, portPairs) + return cancel, ech, nil + } +} diff --git a/tests/v2/e2e/kubernetes/resources.go b/tests/v2/e2e/kubernetes/resources.go new file mode 100644 index 0000000000..08bbe5a8a0 --- /dev/null +++ b/tests/v2/e2e/kubernetes/resources.go @@ -0,0 +1,608 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package kubernetes provides kubernetes e2e tests +package kubernetes + +import ( + "context" + + "github.com/vdaas/vald/internal/errors" + "github.com/vdaas/vald/internal/sync" + appsv1 "k8s.io/api/apps/v1" + autoscalingv1 "k8s.io/api/autoscaling/v1" + batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + applyconfigurationsappsv1 "k8s.io/client-go/applyconfigurations/apps/v1" + applyconfigurationsautoscalingv1 "k8s.io/client-go/applyconfigurations/autoscaling/v1" + applyconfigurationsbatchv1 "k8s.io/client-go/applyconfigurations/batch/v1" + applyconfigurationscorev1 "k8s.io/client-go/applyconfigurations/core/v1" + kclient "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ( + Object = kclient.Object + ObjectList = runtime.Object + NamedObject interface { + comparable + GetName() *string + } +) + +type ResourceInterface[T Object, L ObjectList, C NamedObject] interface { + Get(ctx context.Context, name string, opts metav1.GetOptions) (T, error) + List(ctx context.Context, opts metav1.ListOptions) (L, error) + + Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) + + Create(ctx context.Context, resource T, opts metav1.CreateOptions) (T, error) + + Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error + + Update(ctx context.Context, resource T, opts metav1.UpdateOptions) (T, error) + + Apply(ctx context.Context, resource C, opts metav1.ApplyOptions) (result T, err error) + Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts metav1.PatchOptions, subresources ...string) (result T, err error) +} + +type ExtResourceInterface[T Object] interface { + DeleteCollection(ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions) error + UpdateStatus(ctx context.Context, resource T, opts metav1.UpdateOptions) (T, error) +} + +type ScaleInterface interface { + GetScale(ctx context.Context, resourceName string, options metav1.GetOptions) (*autoscalingv1.Scale, error) + UpdateScale(ctx context.Context, resourceName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions) (*autoscalingv1.Scale, error) + ApplyScale(ctx context.Context, resourceName string, scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, opts metav1.ApplyOptions) (*autoscalingv1.Scale, error) +} + +type PodExtendInterface interface { + UpdateEphemeralContainers(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) + UpdateResize(ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions) (*corev1.Pod, error) +} + +type PodTemplateInterface[T Object] interface { + GetPodTemplate(obj T) (*corev1.PodTemplateSpec, error) + SetPodTemplate(obj T, pt *corev1.PodTemplateSpec) (T, error) +} + +type PodAnnotationInterface[T Object] interface { + GetPodAnnotations(ctx context.Context, name string, opts metav1.GetOptions) (map[string]string, error) + SetPodAnnotations(ctx context.Context, name string, annotations map[string]string, gopts metav1.GetOptions, uopts metav1.UpdateOptions) (T, error) +} + +type ClientControlInterface[T Object, L ObjectList, C NamedObject] interface { + GetInterface() ResourceInterface[T, L, C] + SetInterface(c ResourceInterface[T, L, C]) + + GetClient() Client + SetClient(c Client) + + GetNamespace() string + SetNamespace(namespace string) +} + +type ResourceClient[T Object, L ObjectList, C NamedObject] interface { + ResourceInterface[T, L, C] + ClientControlInterface[T, L, C] +} + +type WorkloadResourceClient[T Object, L ObjectList, C NamedObject] interface { + ResourceClient[T, L, C] + ExtResourceInterface[T] +} + +type WorkloadControllerResourceClient[T Object, L ObjectList, C NamedObject] interface { + WorkloadResourceClient[T, L, C] + PodTemplateInterface[T] + PodAnnotationInterface[T] +} + +type ( + PodClient interface { + WorkloadResourceClient[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration] + PodExtendInterface + } + DeploymentClient interface { + WorkloadControllerResourceClient[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration] + ScaleInterface + } + StatefulSetClient interface { + WorkloadControllerResourceClient[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration] + ScaleInterface + } + DaemonSetClient = WorkloadControllerResourceClient[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration] + JobClient = WorkloadControllerResourceClient[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration] + CronJobClient = WorkloadControllerResourceClient[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration] + ServiceClient = ResourceClient[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration] + SecretClient = ResourceClient[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration] + ConfigMapClient = ResourceClient[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration] + PersistentVolumeClaimClient = ResourceClient[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration] + PersistentVolumeClient = ResourceClient[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration] + EndpointClient = ResourceClient[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration] +) + +type ( + pod = baseClient[*corev1.Pod, *corev1.PodList, *applyconfigurationscorev1.PodApplyConfiguration] + deployment = baseClient[*appsv1.Deployment, *appsv1.DeploymentList, *applyconfigurationsappsv1.DeploymentApplyConfiguration] + daemonSet = baseClient[*appsv1.DaemonSet, *appsv1.DaemonSetList, *applyconfigurationsappsv1.DaemonSetApplyConfiguration] + statefulSet = baseClient[*appsv1.StatefulSet, *appsv1.StatefulSetList, *applyconfigurationsappsv1.StatefulSetApplyConfiguration] + job = baseClient[*batchv1.Job, *batchv1.JobList, *applyconfigurationsbatchv1.JobApplyConfiguration] + cronJob = baseClient[*batchv1.CronJob, *batchv1.CronJobList, *applyconfigurationsbatchv1.CronJobApplyConfiguration] + service = baseClient[*corev1.Service, *corev1.ServiceList, *applyconfigurationscorev1.ServiceApplyConfiguration] + secret = baseClient[*corev1.Secret, *corev1.SecretList, *applyconfigurationscorev1.SecretApplyConfiguration] + configMap = baseClient[*corev1.ConfigMap, *corev1.ConfigMapList, *applyconfigurationscorev1.ConfigMapApplyConfiguration] + pvc = baseClient[*corev1.PersistentVolumeClaim, *corev1.PersistentVolumeClaimList, *applyconfigurationscorev1.PersistentVolumeClaimApplyConfiguration] + pv = baseClient[*corev1.PersistentVolume, *corev1.PersistentVolumeList, *applyconfigurationscorev1.PersistentVolumeApplyConfiguration] + endponts = baseClient[*corev1.Endpoints, *corev1.EndpointsList, *applyconfigurationscorev1.EndpointsApplyConfiguration] +) + +var ( + _ PodClient = (*pod)(nil) + _ DeploymentClient = (*deployment)(nil) + _ DaemonSetClient = (*daemonSet)(nil) + _ StatefulSetClient = (*statefulSet)(nil) + _ JobClient = (*job)(nil) + _ CronJobClient = (*cronJob)(nil) + _ ServiceClient = (*service)(nil) + _ SecretClient = (*secret)(nil) + _ ConfigMapClient = (*configMap)(nil) + _ PersistentVolumeClaimClient = (*pvc)(nil) + _ PersistentVolumeClient = (*pv)(nil) + _ EndpointClient = (*endponts)(nil) +) + +func Pod(c Client, namespace string) PodClient { + if c == nil { + return nil + } + return &pod{ + Interface: c.GetClientSet().CoreV1().Pods(namespace), + Client: c, + Namespace: namespace, + } +} + +func Deployment(c Client, namespace string) DeploymentClient { + return &deployment{ + Interface: c.GetClientSet().AppsV1().Deployments(namespace), + Client: c, + getPodTemplate: func(t *appsv1.Deployment) *corev1.PodTemplateSpec { + return &t.Spec.Template + }, + setPodTemplate: func(t *appsv1.Deployment, pt *corev1.PodTemplateSpec) *appsv1.Deployment { + t.Spec.Template = *pt + return t + }, + Namespace: namespace, + } +} + +func DaemonSet(c Client, namespace string) DaemonSetClient { + return &daemonSet{ + Interface: c.GetClientSet().AppsV1().DaemonSets(namespace), + Client: c, + getPodTemplate: func(t *appsv1.DaemonSet) *corev1.PodTemplateSpec { + return &t.Spec.Template + }, + setPodTemplate: func(t *appsv1.DaemonSet, pt *corev1.PodTemplateSpec) *appsv1.DaemonSet { + t.Spec.Template = *pt + return t + }, + Namespace: namespace, + } +} + +func StatefulSet(c Client, namespace string) StatefulSetClient { + return &statefulSet{ + Interface: c.GetClientSet().AppsV1().StatefulSets(namespace), + Client: c, + getPodTemplate: func(t *appsv1.StatefulSet) *corev1.PodTemplateSpec { + return &t.Spec.Template + }, + setPodTemplate: func(t *appsv1.StatefulSet, pt *corev1.PodTemplateSpec) *appsv1.StatefulSet { + t.Spec.Template = *pt + return t + }, + Namespace: namespace, + } +} + +func Job(c Client, namespace string) JobClient { + return &job{ + Interface: c.GetClientSet().BatchV1().Jobs(namespace), + Client: c, + getPodTemplate: func(t *batchv1.Job) *corev1.PodTemplateSpec { + return &t.Spec.Template + }, + setPodTemplate: func(t *batchv1.Job, pt *corev1.PodTemplateSpec) *batchv1.Job { + t.Spec.Template = *pt + return t + }, + Namespace: namespace, + } +} + +func CronJob(c Client, namespace string) CronJobClient { + return &cronJob{ + Interface: c.GetClientSet().BatchV1().CronJobs(namespace), + Client: c, + getPodTemplate: func(t *batchv1.CronJob) *corev1.PodTemplateSpec { + return &t.Spec.JobTemplate.Spec.Template + }, + setPodTemplate: func(t *batchv1.CronJob, pt *corev1.PodTemplateSpec) *batchv1.CronJob { + t.Spec.JobTemplate.Spec.Template = *pt + return t + }, + Namespace: namespace, + } +} + +func Service(c Client, namespace string) ServiceClient { + return &service{ + Interface: c.GetClientSet().CoreV1().Services(namespace), + Client: c, + Namespace: namespace, + } +} + +func Secret(c Client, namespace string) SecretClient { + return &secret{ + Interface: c.GetClientSet().CoreV1().Secrets(namespace), + Client: c, + Namespace: namespace, + } +} + +func ConfigMap(c Client, namespace string) ConfigMapClient { + return &configMap{ + Interface: c.GetClientSet().CoreV1().ConfigMaps(namespace), + Client: c, + Namespace: namespace, + } +} + +func PersistentVolumeClaim(c Client, namespace string) PersistentVolumeClaimClient { + return &pvc{ + Interface: c.GetClientSet().CoreV1().PersistentVolumeClaims(namespace), + Client: c, + Namespace: namespace, + } +} + +func PersistentVolume(c Client) PersistentVolumeClient { + return &pv{ + Interface: c.GetClientSet().CoreV1().PersistentVolumes(), + Client: c, + } +} + +func Endpoints(c Client, namespace string) EndpointClient { + return &endponts{ + Interface: c.GetClientSet().CoreV1().Endpoints(namespace), + Client: c, + Namespace: namespace, + } +} + +type baseClient[T Object, L ObjectList, C NamedObject] struct { + Interface ResourceInterface[T, L, C] + Client Client + getPodTemplate func(t T) *corev1.PodTemplateSpec + setPodTemplate func(t T, pt *corev1.PodTemplateSpec) T + Namespace string + mu sync.RWMutex +} + +func (b *baseClient[T, L, C]) Create( + ctx context.Context, resource T, opts metav1.CreateOptions, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + return b.Interface.Create(ctx, resource, opts) +} + +func (b *baseClient[T, L, C]) Update( + ctx context.Context, resource T, opts metav1.UpdateOptions, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + return b.Interface.Update(ctx, resource, opts) +} + +func (b *baseClient[T, L, C]) UpdateStatus( + ctx context.Context, resource T, opts metav1.UpdateOptions, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + if eri, ok := b.Interface.(ExtResourceInterface[T]); ok { + return eri.UpdateStatus(ctx, resource, opts) + } + return t, errors.ErrUnimplemented("UpdateStatus") +} + +func (b *baseClient[T, L, C]) Delete( + ctx context.Context, name string, opts metav1.DeleteOptions, +) error { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return errors.ErrKubernetesClientNotFound + } + return b.Interface.Delete(ctx, name, opts) +} + +func (b *baseClient[T, L, C]) DeleteCollection( + ctx context.Context, opts metav1.DeleteOptions, listOpts metav1.ListOptions, +) error { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return errors.ErrKubernetesClientNotFound + } + if eri, ok := b.Interface.(ExtResourceInterface[T]); ok { + return eri.DeleteCollection(ctx, opts, listOpts) + } + return errors.ErrUnimplemented("DeleteCollection") +} + +func (b *baseClient[T, L, C]) Get( + ctx context.Context, name string, opts metav1.GetOptions, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + return b.Interface.Get(ctx, name, opts) +} + +func (b *baseClient[T, L, C]) List(ctx context.Context, opts metav1.ListOptions) (l L, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return l, errors.ErrKubernetesClientNotFound + } + return b.Interface.List(ctx, opts) +} + +func (b *baseClient[T, L, C]) Watch( + ctx context.Context, opts metav1.ListOptions, +) (w watch.Interface, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return w, errors.ErrKubernetesClientNotFound + } + return b.Interface.Watch(ctx, opts) +} + +func (b *baseClient[T, L, C]) Patch( + ctx context.Context, + name string, + pt types.PatchType, + data []byte, + opts metav1.PatchOptions, + subresources ...string, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + return b.Interface.Patch(ctx, name, pt, data, opts, subresources...) +} + +func (b *baseClient[T, L, C]) Apply( + ctx context.Context, resource C, opts metav1.ApplyOptions, +) (t T, err error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return t, errors.ErrKubernetesClientNotFound + } + return b.Interface.Apply(ctx, resource, opts) +} + +func (b *baseClient[T, L, C]) UpdateEphemeralContainers( + ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions, +) (*corev1.Pod, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if pc, ok := b.Interface.(PodExtendInterface); ok { + return pc.UpdateEphemeralContainers(ctx, podName, pod, opts) + } + return nil, errors.ErrUnimplemented("UpdateEphemeralContainers") +} + +func (b *baseClient[T, L, C]) UpdateResize( + ctx context.Context, podName string, pod *corev1.Pod, opts metav1.UpdateOptions, +) (*corev1.Pod, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if pc, ok := b.Interface.(PodExtendInterface); ok { + return pc.UpdateResize(ctx, podName, pod, opts) + } + return nil, errors.ErrUnimplemented("UpdateResize") +} + +func (b *baseClient[T, L, C]) GetScale( + ctx context.Context, resourceName string, options metav1.GetOptions, +) (*autoscalingv1.Scale, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if sc, ok := b.Interface.(ScaleInterface); ok { + return sc.GetScale(ctx, resourceName, options) + } + return nil, errors.ErrUnimplemented("GetScale") +} + +func (b *baseClient[T, L, C]) UpdateScale( + ctx context.Context, resourceName string, scale *autoscalingv1.Scale, opts metav1.UpdateOptions, +) (*autoscalingv1.Scale, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if sc, ok := b.Interface.(ScaleInterface); ok { + return sc.UpdateScale(ctx, resourceName, scale, opts) + } + return nil, errors.ErrUnimplemented("UpdateScale") +} + +func (b *baseClient[T, L, C]) ApplyScale( + ctx context.Context, + resourceName string, + scale *applyconfigurationsautoscalingv1.ScaleApplyConfiguration, + opts metav1.ApplyOptions, +) (*autoscalingv1.Scale, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b == nil || b.Interface == nil { + return nil, errors.ErrKubernetesClientNotFound + } + if sc, ok := b.Interface.(ScaleInterface); ok { + return sc.ApplyScale(ctx, resourceName, scale, opts) + } + return nil, errors.ErrUnimplemented("ApplyScale") +} + +func (b *baseClient[T, L, C]) GetInterface() ResourceInterface[T, L, C] { + b.mu.RLock() + defer b.mu.RUnlock() + return b.Interface +} + +func (b *baseClient[T, L, C]) SetInterface(c ResourceInterface[T, L, C]) { + b.mu.Lock() + b.Interface = c + b.mu.Unlock() +} + +func (b *baseClient[T, L, C]) GetClient() Client { + b.mu.RLock() + defer b.mu.RUnlock() + return b.Client +} + +func (b *baseClient[T, L, C]) SetClient(c Client) { + b.mu.Lock() + b.Client = c + b.mu.Unlock() +} + +func (b *baseClient[T, L, C]) GetNamespace() string { + b.mu.RLock() + defer b.mu.RUnlock() + return b.Namespace +} + +func (b *baseClient[T, L, C]) SetNamespace(namespace string) { + b.mu.Lock() + b.Namespace = namespace + b.mu.Unlock() +} + +func (b *baseClient[T, L, C]) GetPodTemplate(obj T) (*corev1.PodTemplateSpec, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b.getPodTemplate == nil { + return nil, errors.ErrPodTemplateNotFound + } + return b.getPodTemplate(obj), nil +} + +func (b *baseClient[T, L, C]) SetPodTemplate(obj T, pt *corev1.PodTemplateSpec) (T, error) { + b.mu.RLock() + defer b.mu.RUnlock() + if b.setPodTemplate == nil { + return obj, errors.ErrPodTemplateNotFound + } + return b.setPodTemplate(obj, pt), nil +} + +func (b *baseClient[T, L, C]) GetPodAnnotations( + ctx context.Context, name string, opts metav1.GetOptions, +) (map[string]string, error) { + obj, err := b.Get(ctx, name, opts) + if err != nil { + return nil, err + } + tmpl, err := b.GetPodTemplate(obj) + if err != nil { + return nil, err + } + if tmpl == nil || tmpl.Annotations == nil { + return nil, errors.ErrPodTemplateNotFound + } + return tmpl.Annotations, nil +} + +func (b *baseClient[T, L, C]) SetPodAnnotations( + ctx context.Context, + name string, + annotations map[string]string, + gopts metav1.GetOptions, + uopts metav1.UpdateOptions, +) (T, error) { + obj, err := b.Get(ctx, name, gopts) + if err != nil { + return obj, err + } + tmpl, err := b.GetPodTemplate(obj) + if err != nil { + return obj, err + } + if tmpl == nil { + return obj, errors.ErrPodTemplateNotFound + } + if tmpl.Annotations == nil { + tmpl.Annotations = make(map[string]string, len(annotations)) + } + for key, val := range annotations { + tmpl.Annotations[key] = val + } + obj, err = b.SetPodTemplate(obj, tmpl) + if err != nil { + return obj, err + } + return b.Update(ctx, obj, uopts) +} diff --git a/tests/v2/e2e/kubernetes/rollout.go b/tests/v2/e2e/kubernetes/rollout.go new file mode 100644 index 0000000000..8d685edd73 --- /dev/null +++ b/tests/v2/e2e/kubernetes/rollout.go @@ -0,0 +1,66 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package kubernetes provides kubernetes e2e tests +package kubernetes + +import ( + "context" + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/client-go/util/retry" +) + +const ( + rolloutAnnotationKey = "kubectl.kubernetes.io/restartedAt" +) + +// RolloutRestart restarts a kubernetes resources (Deployment, DaemonSet, StatefulSet, Job, CronJob). +// +// # Example +// ```go +// +// client, err := kubernetes.NewClient("/path/to/kubeconfig", "current context") // create a kubernetes client +// if err != nil { +// return err +// } +// +// deploymentClient := kubernetes.Deployment(client, "default") // create a deployment client +// err = kubernetes.RolloutRestart(ctx, deploymentClient, "some deployment") // restart the deployment +// if err != nil { +// return err +// } +// +// statefulSetClient := kubernetes.StatefulSet(client, "default") // create a statefulset client +// err = kubernetes.RolloutRestart(ctx, statefulSetClient, "some statefulset") // restart the statefulset +// if err != nil { +// return err +// } +// +// ``` +func RolloutRestart[T Object, L ObjectList, C NamedObject, I WorkloadControllerResourceClient[T, L, C]]( + ctx context.Context, client I, name string, +) error { + return retry.RetryOnConflict(retry.DefaultRetry, func() (err error) { + _, err = client.SetPodAnnotations(ctx, name, map[string]string{ + rolloutAnnotationKey: time.Now().UTC().Format(time.RFC3339), + }, metav1.GetOptions{}, metav1.UpdateOptions{}) + return err + }) +} diff --git a/tests/v2/e2e/kubernetes/status.go b/tests/v2/e2e/kubernetes/status.go new file mode 100644 index 0000000000..276066c6c5 --- /dev/null +++ b/tests/v2/e2e/kubernetes/status.go @@ -0,0 +1,517 @@ +//go:build e2e + +// +// Copyright (C) 2019-2025 vdaas.org vald team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// You may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// https://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// package kubernetes provides kubernetes e2e tests +package kubernetes + +import ( + "context" + "fmt" + "slices" + "time" + + "github.com/vdaas/vald/internal/errors" + appsv1 "k8s.io/api/apps/v1" // For Deployment, StatefulSet, DaemonSet + batchv1 "k8s.io/api/batch/v1" // For Job and CronJob + corev1 "k8s.io/api/core/v1" // For Pod, PersistentVolumeClaim, Service + networkingv1 "k8s.io/api/networking/v1" // For Ingress + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// -------------------------------------------------------------------------------- +// ResourceStatus enum with extended states for detailed status reporting. +// -------------------------------------------------------------------------------- +type ResourceStatus int + +const ( + StatusUnknown ResourceStatus = iota // Unknown state + StatusPending // Resource is initializing or waiting for update observation + StatusUpdating // Resource is in the process of updating/rolling out changes + StatusAvailable // Resource is fully operational + StatusDegraded // Resource is operational but with some issues + StatusFailed // Resource has failed + StatusCompleted // For jobs: execution completed successfully + StatusScheduled // For jobs: scheduled but not yet started + StatusScaling // Resource is scaling up or down + StatusPaused // Resource update/rollout is paused + StatusTerminating // Resource (e.g., Pod) is in the process of termination + StatusNotReady // Resource (e.g., Pod) is running but not yet ready + StatusBound // PVC is bound to a volume + StatusLoadBalancing // Service is still provisioning a load balancer +) + +// Human-readable mapping for ResourceStatus values. +var ResourceStatusMap = map[ResourceStatus]string{ + StatusUnknown: "Unknown state", + StatusPending: "Initializing or waiting for update observation", + StatusUpdating: "Updating / Rolling out a new version", + StatusAvailable: "Fully operational", + StatusDegraded: "Degraded state", + StatusFailed: "Failed", + StatusCompleted: "Completed successfully", + StatusScheduled: "Scheduled but not started", + StatusScaling: "Scaling in progress", + StatusPaused: "Rollout paused", + StatusTerminating: "Terminating", + StatusNotReady: "Running but not ready", + StatusBound: "PVC is bound", + StatusLoadBalancing: "Load balancer provisioning in progress", +} + +// -------------------------------------------------------------------------------- +// WaitForStatus waits for a Kubernetes resource to reach a specific status. +// The function checks the status of the resource at regular intervals and returns +// the object, a boolean indicating if the status matched, and an error (if any). +// The function supports Deployment, StatefulSet, DaemonSet, Job, CronJob, Pod, +// PersistentVolumeClaim, Service, and Ingress. +// -------------------------------------------------------------------------------- +func WaitForStatus[T Object, L ObjectList, C NamedObject, I ResourceInterface[T, L, C]]( + ctx context.Context, client I, name string, statuses ...ResourceStatus, +) (obj T, matched bool, err error) { + if !slices.ContainsFunc(PossibleStatuses(obj), func(st ResourceStatus) bool { + return slices.Contains(statuses, st) + }) { + return obj, false, errors.ErrStatusPatternNeverMatched + } + + ticker := time.NewTicker(5 * time.Second) + defer ticker.Stop() + for { + select { + case <-ctx.Done(): + return obj, false, ctx.Err() + case <-ticker.C: + obj, err = client.Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return obj, false, err + } + status, info, err := CheckResourceState(obj) + if err != nil { + return obj, false, errors.Wrap(err, info) + } + for _, st := range statuses { + if st == status { + return obj, true, nil + } + } + } + } +} + +// -------------------------------------------------------------------------------- +// PossibleStatuses returns a list of possible ResourceStatus values for a given object. +// The function supports Deployment, StatefulSet, DaemonSet, Job, CronJob, Pod, +// PersistentVolumeClaim, Service, and Ingress. +// -------------------------------------------------------------------------------- +func PossibleStatuses[T Object](obj T) []ResourceStatus { + switch any(obj).(type) { + case *appsv1.Deployment: + return []ResourceStatus{StatusPending, StatusUpdating, StatusAvailable, StatusDegraded, StatusFailed, StatusPaused} + case *appsv1.StatefulSet: + return []ResourceStatus{StatusPending, StatusUpdating, StatusAvailable, StatusDegraded, StatusFailed} + case *appsv1.DaemonSet: + return []ResourceStatus{StatusPending, StatusUpdating, StatusAvailable, StatusDegraded, StatusFailed} + case *batchv1.Job: + return []ResourceStatus{StatusUpdating, StatusFailed, StatusCompleted, StatusScheduled} + case *batchv1.CronJob: + return []ResourceStatus{StatusPaused, StatusPending, StatusAvailable} + case *corev1.Pod: + return []ResourceStatus{StatusUnknown, StatusAvailable, StatusPending, StatusCompleted, StatusFailed, StatusTerminating, StatusNotReady} + case *corev1.PersistentVolumeClaim: + return []ResourceStatus{StatusUnknown, StatusPending, StatusFailed, StatusBound} + case *corev1.Service: + return []ResourceStatus{StatusAvailable, StatusLoadBalancing} + case *networkingv1.Ingress: + return []ResourceStatus{StatusPending, StatusAvailable} + default: + return []ResourceStatus{StatusUnknown} + } +} + +// -------------------------------------------------------------------------------- +// checkResourceState determines the detailed state of a Kubernetes resource. +// It returns a ResourceStatus enum, a detailed string message, and an error (if any). +// This function supports Deployment, StatefulSet, DaemonSet, Job, CronJob, Pod, +// PersistentVolumeClaim, Service, and Ingress. +// -------------------------------------------------------------------------------- +func CheckResourceState[T Object](obj T) (ResourceStatus, string, error) { + switch res := any(obj).(type) { + case *appsv1.Deployment: + return evaluateDeployment(res) + case *appsv1.StatefulSet: + return evaluateStatefulSet(res) + case *appsv1.DaemonSet: + return evaluateDaemonSet(res) + case *batchv1.Job: + return evaluateJob(res) + case *batchv1.CronJob: + return evaluateCronJob(res) + case *corev1.Pod: + return evaluatePod(res) + case *corev1.PersistentVolumeClaim: + return evaluatePVC(res) + case *corev1.Service: + return evaluateService(res) + case *networkingv1.Ingress: + return evaluateIngress(res) + default: + return StatusUnknown, "Unsupported resource type", errors.ErrUnsupportedKubernetesResourceType(obj) + } +} + +// -------------------------------------------------------------------------------- +// evaluateDeployment evaluates the status of a Deployment resource. +// It checks: +// - Generation vs ObservedGeneration +// - Spec.Replicas (desired) vs Status.Replicas, UpdatedReplicas, and AvailableReplicas +// - Conditions: DeploymentProgressing and DeploymentAvailable +// - Whether the deployment is paused +// -------------------------------------------------------------------------------- +func evaluateDeployment(deploy *appsv1.Deployment) (ResourceStatus, string, error) { + desired := int32(1) + if deploy.Spec.Replicas != nil { + desired = *deploy.Spec.Replicas + } + + // Build a detailed status string. + statusDetails := fmt.Sprintf("Name: %s, Generation: %d, ObservedGeneration: %d, Spec.Replicas: %d, Status.Replicas: %d, UpdatedReplicas: %d, AvailableReplicas: %d.", + deploy.GetName(), deploy.GetGeneration(), deploy.Status.ObservedGeneration, desired, deploy.Status.Replicas, deploy.Status.UpdatedReplicas, deploy.Status.AvailableReplicas) + + // Check if the Deployment is paused. + if deploy.Spec.Paused { + statusDetails += "Deployment is paused." + return StatusPaused, statusDetails, nil + } + + // Ensure the controller has observed the latest update. + if deploy.Status.ObservedGeneration < deploy.Generation { + statusDetails += "Update not yet observed by controller." + return StatusPending, statusDetails, nil + } + + // Inspect Deployment conditions. + var progressingCond *appsv1.DeploymentCondition + var availableCond *appsv1.DeploymentCondition + for _, cond := range deploy.Status.Conditions { + if cond.Type == appsv1.DeploymentProgressing { + progressingCond = &cond + } else if cond.Type == appsv1.DeploymentAvailable { + availableCond = &cond + } + } + if progressingCond != nil { + statusDetails += fmt.Sprintf("Progressing condition: %s, Status: %s.", progressingCond.Reason, progressingCond.Status) + if progressingCond.Status == corev1.ConditionFalse { + return StatusFailed, statusDetails, nil + } + } + if availableCond != nil { + statusDetails += fmt.Sprintf("Available condition: %s, Status: %s.", availableCond.Reason, availableCond.Status) + if availableCond.Status == corev1.ConditionFalse { + return StatusDegraded, statusDetails, nil + } + } + + // Check if the number of updated and available replicas meets the desired count. + if deploy.Status.UpdatedReplicas < desired { + statusDetails += fmt.Sprintf("Only %d out of %d replicas updated.", deploy.Status.UpdatedReplicas, desired) + return StatusUpdating, statusDetails, nil + } + if deploy.Status.UpdatedReplicas < deploy.Status.Replicas { + statusDetails += fmt.Sprintf("There are %d total replicas but only %d replicas updated.", deploy.Status.UpdatedReplicas, deploy.Status.Replicas) + return StatusUpdating, statusDetails, nil + } + if deploy.Status.AvailableReplicas < desired { + statusDetails += fmt.Sprintf("Only %d out of %d replicas available.", deploy.Status.AvailableReplicas, desired) + return StatusDegraded, statusDetails, nil + } + + statusDetails += "Deployment is fully operational." + return StatusAvailable, statusDetails, nil +} + +// -------------------------------------------------------------------------------- +// evaluateStatefulSet evaluates the status of a StatefulSet resource. +// It checks: +// - Generation vs ObservedGeneration +// - Spec.Replicas (desired) vs UpdatedReplicas, CurrentReplicas, and ReadyReplicas +// - Whether UpdateRevision equals CurrentRevision +// -------------------------------------------------------------------------------- +func evaluateStatefulSet(sts *appsv1.StatefulSet) (ResourceStatus, string, error) { + desired := int32(1) + if sts.Spec.Replicas != nil { + desired = *sts.Spec.Replicas + } + + statusDetails := fmt.Sprintf( + "Name: %s, Generation: %d, ObservedGeneration: %d, Spec.Replicas: %d, CurrentReplicas: %d, UpdatedReplicas: %d, ReadyReplicas: %d, CurrentRevision: %s, UpdateRevision: %s.", + sts.GetName(), + sts.GetGeneration(), + sts.Status.ObservedGeneration, + desired, + sts.Status.CurrentReplicas, + sts.Status.UpdatedReplicas, + sts.Status.ReadyReplicas, + sts.Status.CurrentRevision, + sts.Status.UpdateRevision, + ) + + if sts.Status.ObservedGeneration < sts.Generation { + statusDetails += "Update not yet observed by controller." + return StatusPending, statusDetails, nil + } + + if sts.Status.UpdatedReplicas < desired { + statusDetails += fmt.Sprintf("Only %d out of %d replicas updated.", sts.Status.UpdatedReplicas, desired) + return StatusUpdating, statusDetails, nil + } + + if sts.Status.CurrentReplicas < desired { + statusDetails += fmt.Sprintf("Only %d out of %d replicas are currently running.", sts.Status.CurrentReplicas, desired) + return StatusUpdating, statusDetails, nil + } + + if sts.Status.ReadyReplicas < desired { + statusDetails += fmt.Sprintf("Only %d out of %d replicas are ready.", sts.Status.ReadyReplicas, desired) + return StatusDegraded, statusDetails, nil + } + + if sts.Status.UpdateRevision != sts.Status.CurrentRevision { + statusDetails += fmt.Sprintf("Revision mismatch: CurrentRevision=%s, UpdateRevision=%s.", sts.Status.CurrentRevision, sts.Status.UpdateRevision) + return StatusUpdating, statusDetails, nil + } + + statusDetails += "StatefulSet is fully operational." + return StatusAvailable, statusDetails, nil +} + +// -------------------------------------------------------------------------------- +// evaluateDaemonSet evaluates the status of a DaemonSet resource. +// It checks: +// - Generation vs ObservedGeneration +// - DesiredNumberScheduled vs UpdatedNumberScheduled and NumberAvailable +// - Conditions if available for additional insights +// -------------------------------------------------------------------------------- +func evaluateDaemonSet(ds *appsv1.DaemonSet) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("Name: %s, Generation: %d, ObservedGeneration: %d, DesiredNumberScheduled: %d, UpdatedNumberScheduled: %d, NumberAvailable: %d, NumberReady: %d.", + ds.GetName(), ds.GetGeneration(), ds.Status.ObservedGeneration, ds.Status.DesiredNumberScheduled, ds.Status.UpdatedNumberScheduled, ds.Status.NumberAvailable, ds.Status.NumberReady) + + if ds.Status.ObservedGeneration < ds.Generation { + statusDetails += "Update not yet observed by controller." + return StatusPending, statusDetails, nil + } + + // Check DaemonSet conditions if present (not always available) + for _, cond := range ds.Status.Conditions { + // Using a generic condition check similar to DeploymentProgressing. + if cond.Type == appsv1.DaemonSetConditionType("") && cond.Status == corev1.ConditionFalse { + statusDetails += fmt.Sprintf("Condition %s is false: %s.", cond.Type, cond.Reason) + return StatusFailed, statusDetails, nil + } + } + + if ds.Status.UpdatedNumberScheduled < ds.Status.DesiredNumberScheduled { + statusDetails += fmt.Sprintf("Only %d out of %d pods updated.", ds.Status.UpdatedNumberScheduled, ds.Status.DesiredNumberScheduled) + return StatusUpdating, statusDetails, nil + } + + if ds.Status.NumberAvailable < ds.Status.DesiredNumberScheduled { + statusDetails += fmt.Sprintf("Only %d out of %d pods available.", ds.Status.NumberAvailable, ds.Status.DesiredNumberScheduled) + return StatusDegraded, statusDetails, nil + } + + statusDetails += "DaemonSet is fully operational." + return StatusAvailable, statusDetails, nil +} + +// -------------------------------------------------------------------------------- +// evaluateJob evaluates the status of a Job resource. +// It checks: +// - Active, Succeeded, and Failed counts +// - Conditions (e.g., JobFailed, JobComplete) +// -------------------------------------------------------------------------------- +func evaluateJob(job *batchv1.Job) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("Name: %s, Active: %d, Succeeded: %d, Failed: %d.", job.GetName(), job.Status.Active, job.Status.Succeeded, job.Status.Failed) + + // Check job conditions for additional information. + for _, cond := range job.Status.Conditions { + statusDetails += fmt.Sprintf("Condition Type: %s, Status: %s, Reason: %s.", cond.Type, cond.Status, cond.Reason) + if cond.Type == batchv1.JobFailed && cond.Status == corev1.ConditionTrue { + return StatusFailed, statusDetails, nil + } + if cond.Type == batchv1.JobComplete && cond.Status == corev1.ConditionTrue { + return StatusCompleted, statusDetails, nil + } + } + + if job.Status.Succeeded > 0 { + statusDetails += "Job has succeeded." + return StatusCompleted, statusDetails, nil + } + if job.Status.Active > 0 { + statusDetails += "Job is currently running." + return StatusUpdating, statusDetails, nil + } + + // If none of the above, the job may be scheduled but not yet started. + statusDetails += "Job is scheduled but not yet started." + return StatusScheduled, statusDetails, nil +} + +// -------------------------------------------------------------------------------- +// evaluateCronJob evaluates the status of a CronJob resource. +// It checks: +// - Whether the CronJob is suspended +// - LastScheduleTime and its timing +// -------------------------------------------------------------------------------- +func evaluateCronJob(cronjob *batchv1.CronJob) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("CronJob Name: %s.", cronjob.GetName()) + + // Check if the CronJob is suspended. + if cronjob.Spec.Suspend != nil && *cronjob.Spec.Suspend { + statusDetails += "CronJob is suspended." + return StatusPaused, statusDetails, nil + } + + // Check the last schedule time. + if cronjob.Status.LastScheduleTime == nil { + statusDetails += "CronJob has not yet scheduled any jobs." + return StatusPending, statusDetails, nil + } + + lastSchedule := cronjob.Status.LastScheduleTime.Format(time.RFC3339) + statusDetails += fmt.Sprintf("Last scheduled at %s.", lastSchedule) + return StatusAvailable, statusDetails, nil +} + +// -------------------------------------------------------------------------------- +// evaluatePod evaluates the status of a Pod resource. +// It checks: +// - Pod Phase (Pending, Running, Succeeded, Failed, Unknown) +// - Pod conditions for readiness (especially PodReady) +// - DeletionTimestamp to detect termination +// -------------------------------------------------------------------------------- +func evaluatePod(pod *corev1.Pod) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("Pod %s Phase: %s.", pod.GetName(), pod.Status.Phase) + + // Check if the pod is being terminated. + if pod.DeletionTimestamp != nil { + statusDetails += fmt.Sprintf("Pod is terminating (DeletionTimestamp: %s).", pod.DeletionTimestamp.Format(time.RFC3339)) + return StatusTerminating, statusDetails, nil + } + + // Evaluate based on pod phase. + switch pod.Status.Phase { + case corev1.PodPending: + statusDetails += "Pod is pending scheduling or initialization." + return StatusPending, statusDetails, nil + case corev1.PodRunning: + // Check PodReady condition. + ready := false + for _, cond := range pod.Status.Conditions { + if cond.Type == corev1.PodReady { + ready = (cond.Status == corev1.ConditionTrue) + statusDetails += fmt.Sprintf("PodReady condition: %s (Reason: %s).", cond.Status, cond.Reason) + } + } + if !ready { + statusDetails += "Pod is running but not ready." + return StatusNotReady, statusDetails, nil + } + statusDetails += "Pod is running and ready." + return StatusAvailable, statusDetails, nil + case corev1.PodSucceeded: + statusDetails += "Pod has completed successfully." + return StatusCompleted, statusDetails, nil + case corev1.PodFailed: + statusDetails += "Pod execution has failed." + return StatusFailed, statusDetails, nil + case corev1.PodUnknown: + statusDetails += "Pod status is unknown." + return StatusUnknown, statusDetails, nil + default: + statusDetails += "Pod status unrecognized." + return StatusUnknown, statusDetails, nil + } +} + +// -------------------------------------------------------------------------------- +// evaluatePVC evaluates the status of a PersistentVolumeClaim (PVC). +// It checks: +// - PVC Phase (Bound, Pending, Lost) +// - Provides details on volume binding. +// -------------------------------------------------------------------------------- +func evaluatePVC(pvc *corev1.PersistentVolumeClaim) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("PVC %s Phase: %s.", pvc.GetName(), pvc.Status.Phase) + switch pvc.Status.Phase { + case corev1.ClaimBound: + statusDetails += fmt.Sprintf("PVC is bound to volume: %s.", pvc.Spec.VolumeName) + return StatusBound, statusDetails, nil + case corev1.ClaimPending: + statusDetails += "PVC is pending binding to a volume." + return StatusPending, statusDetails, nil + case corev1.ClaimLost: + statusDetails += "PVC has lost its bound volume." + return StatusFailed, statusDetails, nil + default: + statusDetails += "PVC status unrecognized." + return StatusUnknown, statusDetails, nil + } +} + +// -------------------------------------------------------------------------------- +// evaluateService evaluates the status of a Service resource. +// It checks: +// - Service Type (ClusterIP, NodePort, LoadBalancer, ExternalName) +// - For LoadBalancer services, it verifies if ingress information is available. +// -------------------------------------------------------------------------------- +func evaluateService(svc *corev1.Service) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("Name %s, Service Type: %s. ClusterIP: %s.", svc.GetName(), svc.Spec.Type, svc.Spec.ClusterIP) + if svc.Spec.Type == corev1.ServiceTypeLoadBalancer { + if len(svc.Status.LoadBalancer.Ingress) == 0 { + statusDetails += "LoadBalancer ingress not yet assigned." + return StatusLoadBalancing, statusDetails, nil + } + ingresses := "" + for _, ingress := range svc.Status.LoadBalancer.Ingress { + ingresses += ingress.IP + " " + ingress.Hostname + " " + } + statusDetails += fmt.Sprintf("LoadBalancer ingress assigned: %s.", ingresses) + } + return StatusAvailable, statusDetails + "Service is operational.", nil +} + +// -------------------------------------------------------------------------------- +// evaluateIngress evaluates the status of an Ingress resource. +// It checks: +// - Whether the Ingress has been assigned an external IP (via LoadBalancer) +// - Provides details on the number of ingress points. +// -------------------------------------------------------------------------------- +func evaluateIngress(ing *networkingv1.Ingress) (ResourceStatus, string, error) { + statusDetails := fmt.Sprintf("Ingress Name: %s.", ing.GetName()) + if len(ing.Status.LoadBalancer.Ingress) == 0 { + statusDetails += "No external ingress IP assigned yet." + return StatusPending, statusDetails, nil + } + ingresses := "" + for _, lb := range ing.Status.LoadBalancer.Ingress { + ingresses += lb.IP + " " + lb.Hostname + " " + } + statusDetails += fmt.Sprintf("External ingress IP(s) assigned: %s.", ingresses) + return StatusAvailable, statusDetails, nil +}