Skip to content

Merge pull request #1619 from roboflow/feature/improvements-and-fixes… #1057

Merge pull request #1619 from roboflow/feature/improvements-and-fixes…

Merge pull request #1619 from roboflow/feature/improvements-and-fixes… #1057

name: Code Quality & Regression Tests - NVIDIA T4
permissions:
contents: read
on:
push:
branches: [main]
workflow_dispatch:
inputs:
test_name:
description: 'Name of specific test to run. Leave empty to run all tests.'
required: false
default: ''
type: choice
options:
- ''
- regression_without_torch
- regression_with_torch
- paligemma
- florence
- sam2
- moondream2
jobs:
build:
if: ${{ !github.event.act }}
runs-on: Roboflow-GPU-VM-Runner
timeout-minutes: 120
steps:
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: 🛎️ Checkout
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: 🦾 Install dependencies
run: |
python3 -m venv .venv
source .venv/bin/activate
python3 -m pip install --upgrade pip
python3 -m pip install -r requirements/requirements.test.integration.txt -r requirements/requirements.test.unit.txt
- name: �🔨 Build and Push Test Docker - GPU
run: |
docker build -t roboflow/roboflow-inference-server-gpu:test -f docker/dockerfiles/Dockerfile.onnx.gpu .
- name: 🔋 Start Test Docker without Torch Preprocessing - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_without_torch' }}
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=False INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests without Torch Preprocessing - GPU
id: regression_tests_without_torch
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_without_torch' }}
run: |
source .venv/bin/activate
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 SKIP_LMM_TEST=True API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_without_torch' }}
run: make stop_test_docker
- name: 🔋 Start Test Docker with Torch Preprocessing - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_with_torch' }}
run: |
PORT=9101 USE_PYTORCH_FOR_PREPROCESSING=True INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests with Torch Preprocessing - GPU
id: regression_tests_with_torch
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_with_torch' }}
run: |
source .venv/bin/activate
MINIMUM_FPS=25 FUNCTIONAL=true PORT=9101 SKIP_LMM_TEST=True API_KEY=${{ secrets.API_KEY }} asl_instance_segmentation_API_KEY=${{ secrets.ASL_INSTANCE_SEGMENTATION_API_KEY }} asl_poly_instance_seg_API_KEY=${{ secrets.ASL_POLY_INSTANCE_SEG_API_KEY }} bccd_favz3_API_KEY=${{ secrets.BCCD_FAVZ3_API_KEY }} bccd_i4nym_API_KEY=${{ secrets.BCCD_I4NYM_API_KEY }} cats_and_dogs_smnpl_API_KEY=${{ secrets.CATS_AND_DOGS_SMNPL_API_KEY }} coins_xaz9i_API_KEY=${{ secrets.COINS_XAZ9I_API_KEY }} melee_API_KEY=${{ secrets.MELEE_API_KEY }} yolonas_test_API_KEY=${{ secrets.YOLONAS_TEST_API_KEY }} python3 -m pytest tests/inference/integration_tests/
- name: 🧹 Cleanup Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'regression_with_torch' }}
run: make stop_test_docker
- name: 🔋 Start Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'paligemma' }}
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Paligemma - GPU
id: paligemma_tests
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'paligemma' }}
run: |
source .venv/bin/activate
PORT=9101 melee_API_KEY=${{ secrets.MELEE_API_KEY }} python3 -m pytest tests/inference/integration_tests/test_paligemma.py
- name: 🧹 Cleanup Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'paligemma' }}
run: make stop_test_docker
- name: 🔋 Start Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'florence' }}
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Florence - GPU
id: florence_tests
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'florence' }}
run: |
source .venv/bin/activate
PORT=9101 melee_API_KEY=${{ secrets.MELEE_API_KEY }} python3 -m pytest tests/inference/integration_tests/test_florence.py
- name: 🧹 Cleanup Test Docker - GPU
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'florence' }}
run: make stop_test_docker
- name: 🔋 Start Test Docker - SAM2
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'sam2' }}
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests - SAM2
id: sam2_tests
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'sam2' }}
run: |
source .venv/bin/activate
PORT=9101 API_KEY=${{ secrets.API_KEY }} SKIP_SAM2_TESTS=False python3 -m pytest tests/inference/integration_tests/test_sam2.py
- name: 🧹 Cleanup Test Docker - SAM2
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'sam2' }}
run: make stop_test_docker
# - name: 🔋 Start Test Docker - GPU
# if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'qwen25' }}
# run: |
# PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
# - name: 🧪 Regression Tests - Qwen2.5
# id: qwen25_tests
# if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'qwen25' }}
# run: |
# source .venv/bin/activate
# PORT=9101 API_KEY=${{ secrets.QWEN_API_KEY }} SKIP_QWEN25_TESTS=False python3 -m pytest tests/inference/integration_tests/qwen_test.py
# - name: 🧹 Cleanup Test Docker - Qwen2.5
# if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'qwen25' }}
# run: make stop_test_docker
- name: 🔋 Start Test Docker - MoonDream
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'moondream2' }}
run: |
PORT=9101 INFERENCE_SERVER_REPO=roboflow-inference-server-gpu make start_test_docker_gpu
- name: 🧪 Regression Tests - MoonDream
id: moondream_tests
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'moondream2' }}
run: |
source .venv/bin/activate
PORT=9101 API_KEY=${{ secrets.API_KEY }} SKIP_MOONDREAM2_TEST=False python3 -m pytest tests/inference/integration_tests/test_moondream2.py
- name: 🧹 Cleanup Test Docker - MoonDream
if: ${{ github.event.inputs.test_name == '' || github.event.inputs.test_name == 'moondream2' }}
run: make stop_test_docker
- name: 🚨 Show server logs on error
run: |
docker logs inference-test
make stop_test_docker
if: failure()