diff --git a/README.md b/README.md index 880b7758..f7ae8a4e 100644 --- a/README.md +++ b/README.md @@ -189,23 +189,23 @@ command, see [Environment Setup](#environment-setup). pip install "leads[standard]" ``` -If your platform does not support GPIO, use profile "no-gpio". - -```shell -pip install "leads[no-gpio]" -``` - If you only want the framework, run the following. ```shell pip install leads ``` -#### Verify +This table lists all installation profiles. -```shell -leads-vec info -``` +| Profile | Content | For | All Platforms | +|----------------------|-----------------------------------------------------------------|--------------------------------------------------|---------------| +| leads | Only the framework | LEADS Framework | ✓ | +| "leads[standard]" | The framework and necessary dependencies | LEADS Framework | ✓ | +| "leads[gpio]" | Everything "leads[standard]" has plug `lgpio` | LEADS Framework | ✗ | +| "leads[vec]" | Everything "leads[gpio]" has plus `pynput` | LEADS VeC | ✗ | +| "leads[vec-no-gpio]" | Everything "leads[standard]" has plus `pynput` | LEADS VeC (if you are not using any GPIO device) | ✓ | +| "leads[vec-rc]" | Everything "leads[standard]" has plus `"fastapi[standard]` | LEADS VeC Remote Analyst | ✓ | +| "leads[vec-dp]" | Everything "leads[standard]" has plus `matplotlib` and `pyyaml` | LEADS VeC Data Processor | ✓ | ### Arduino @@ -225,6 +225,12 @@ the framework in your project. leads-vec run ``` +#### Verify + +```shell +leads-vec info +``` + #### Replay ```shell @@ -356,12 +362,6 @@ automatically calculate the best factor to keep the original proportion as desig ### Remote Analyst -The remote analyst requires additional dependencies. Install them through the following command. - -```shell -pip install "leads[all]" -``` - ```shell leads-vec-rc ``` @@ -394,6 +394,14 @@ If not specified, all configurations will be default values. To learn about the configuration file, read [Configurations](#configurations). +### Data Processor + +```shell +leads-vec-dp path/to/the/workflow.yml +``` + +To learn more about workflows, read [Workflows](#workflows). + ## Environment Setup This section helps you set up the identical environment we have for the VeC project. A more detailed guide of @@ -497,6 +505,62 @@ Note that a purely empty file could cause an error. | `data_dir` | `str` | Directory for the data recording system | Remote | `"data"` | | `save_data` | `bool` | `True`: save data; `False`: discard data | Remote | `False` | +## Workflows + +This only applies to LEADS VeC Data Processor. Please find a more detailed version +[here](https://leads-docs.projectneura.org/en/latest/vec/index.html#workflows). + +```yaml +dataset: "data/main.csv" +inferences: + repeat: 100 # default: 1 + enhanced: true # default: false + assume_initial_zeros: true # default: false + methods: + - safe-speed + - speed-by-acceleration + - speed-by-mileage + - speed-by-gps-ground-speed + - speed-by-gps-position + - forward-acceleration-by-speed + - milage-by-speed + - milage-by-gps-position + - visual-data-realignment-by-latency + +jobs: + - name: Task 1 + uses: bake + - name: Task 2 + uses: process + with: + lap_time_assertions: # default: [] + - 120 # lap 1 duration (seconds) + - 180 # lap 2 duration (seconds) + vehicle_hit_box: 5 # default: 3 + min_lap_time: 60 # default: 30 (seconds) + - name: Draw Lap 5 + uses: draw-lap + with: + lap_index: 4 # default: -1 + - name: Suggest on Lap 5 + uses: suggest-on-lap + with: + lap_index: 4 + - name: Draw Comparison of Laps + uses: draw-comparison-of-laps + with: + width: 0.5 # default: 0.3 + - name: Extract Video + uses: extract-video + with: + file: rear-view.mp4 # destination to save the video + tag: rear # front, left, right, or rear + - name: Save + uses: save-as + with: + file: data/new.csv +``` + ## Devices Module ### Example diff --git a/docs/LEADS.pptx b/docs/LEADS.pptx index 27bdf891..77d8f7e3 100644 Binary files a/docs/LEADS.pptx and b/docs/LEADS.pptx differ diff --git a/leads/data_persistence/analyzer/inference.py b/leads/data_persistence/analyzer/inference.py index 0fce4368..0c474ca2 100644 --- a/leads/data_persistence/analyzer/inference.py +++ b/leads/data_persistence/analyzer/inference.py @@ -245,7 +245,7 @@ def complete(self, *rows: dict[str, _Any], backward: bool = False) -> dict[str, original_target = target.copy() t_0, t = target["t"], base["t"] for channel in self._channels: - if (new_latency := t_0 - t + base[f"{channel}_view_latency"]) > 0: + if (new_latency := t - t_0 - base[f"{channel}_view_latency"]) < 0: continue target[f"{channel}_view_base64"] = base[f"{channel}_view_base64"] target[f"{channel}_view_latency"] = new_latency @@ -272,8 +272,9 @@ def merge(raw: dict[str, _Any], inferred: dict[str, _Any]) -> None: for key in inferred.keys(): raw[key] = inferred[key] - def _complete(self, inferences: tuple[Inference, ...], enhanced: bool, backward: bool) -> None: + def _complete(self, inferences: tuple[Inference, ...], enhanced: bool, backward: bool) -> int: num_rows = len(self._raw_data) + num_affected_rows = 0 for i in range(num_rows - 1, -1, -1) if backward else range(num_rows): for inference in inferences: p, f = inference.depth() @@ -287,7 +288,9 @@ def _complete(self, inferences: tuple[Inference, ...], enhanced: bool, backward: InferredDataset.merge(row, self._inferred_data[j]) d.append(row) if (r := inference.complete(*d, backward=backward)) is not None: + num_affected_rows += 1 InferredDataset.merge(self._inferred_data[i], r) + return num_affected_rows @_override def load(self) -> None: @@ -311,20 +314,20 @@ def assume_initial_zeros(self) -> None: injection["mileage"] = 0 InferredDataset.merge(row, injection) - def complete(self, *inferences: Inference, enhanced: bool = False, assume_initial_zeros: bool = False) -> None: + def complete(self, *inferences: Inference, enhanced: bool = False, assume_initial_zeros: bool = False) -> int: """ Infer the missing values in the dataset. :param inferences: the inferences to apply :param enhanced: True: use inferred data to infer other data; False: use only raw data to infer other data :param assume_initial_zeros: True: reasonably set any missing data in the first row to zero; False: no change + :return: the number of affected rows """ for inference in inferences: if not set(rh := inference.header()).issubset(ah := self.read_header()): raise KeyError(f"Inference {inference} requires header {rh} but the dataset only contains {ah}") if assume_initial_zeros: self.assume_initial_zeros() - self._complete(inferences, enhanced, False) - self._complete(inferences, enhanced, True) + return self._complete(inferences, enhanced, False) + self._complete(inferences, enhanced, True) @_override def __iter__(self) -> _Generator[dict[str, _Any], None, None]: diff --git a/leads/data_persistence/analyzer/processor.py b/leads/data_persistence/analyzer/processor.py index 348ad446..c8370910 100644 --- a/leads/data_persistence/analyzer/processor.py +++ b/leads/data_persistence/analyzer/processor.py @@ -10,7 +10,7 @@ from leads.data import dlat2meters, dlon2meters, format_duration from leads.data_persistence.analyzer.utils import time_invalid, speed_invalid, mileage_invalid, latitude_invalid, \ - longitude_invalid + longitude_invalid, latency_invalid from leads.data_persistence.core import CSVDataset, DEFAULT_HEADER from .._computational import sqrt as _sqrt @@ -38,6 +38,9 @@ def __init__(self, dataset: CSVDataset) -> None: self._gps_invalid_rows: list[int] = [] self._min_lat: float | None = None self._min_lon: float | None = None + # visual + self._min_latency: float | None = None + self._max_latency: float | None = None # process variables self._laps: list[tuple[int, int, int, float, float]] = [] @@ -69,8 +72,7 @@ def unit(row: dict[str, _Any], i: int) -> None: t = int(row["t"]) speed = row["speed"] mileage = row["mileage"] - if time_invalid(t) or speed_invalid( - speed) or mileage_invalid(mileage): + if time_invalid(t) or speed_invalid(speed) or mileage_invalid(mileage): self._invalid_rows.append(i) return if self._start_time is None: @@ -94,6 +96,15 @@ def unit(row: dict[str, _Any], i: int) -> None: self._min_lon = lon self._gps_valid_count += 1 self._valid_rows_count += 1 + # visual + latencies = [row[key] for key in ("front_view_latency", "left_view_latency", "right_view_latency", + "rear_view_latency") if key in row.keys()] + latency = min(latencies) + if not latency_invalid(latency) and (self._min_latency is None or latency < self._min_latency): + self._min_latency = latency + latency = max(latencies) + if not latency_invalid(latency) and (self._max_latency is None or latency > self._max_latency): + self._max_latency = latency self.foreach(unit, False) if self._valid_rows_count == 0: @@ -107,7 +118,7 @@ def _hide_others(seq: _Sequence[_Any], limit: int) -> str: return f"[{", ".join(map(str, seq[:limit]))}, and {diff} others]" if (diff := len(seq) - limit) > 0 else str( seq) - def baking_results(self) -> tuple[str, str, str, str, str, str, str, str, str, str, str, str]: + def baking_results(self) -> tuple[str, str, str, str, str, str, str, str, str, str, str, str, str, str]: """ Get the results of the baking process. :return: the results in sentences @@ -129,7 +140,9 @@ def baking_results(self) -> tuple[str, str, str, str, str, str, str, str, str, s f"v\u2098\u2090\u2093: {self._max_speed:.2f} KM / H", f"v\u2090\u1D65\u1D4D: {self._avg_speed:.2f} KM / H", f"GPS Hit Rate: {100 * self._gps_valid_count / self._valid_rows_count:.2f}%", - f"GPS Skipped Rows: {Processor._hide_others(self._gps_invalid_rows, 5)}" + f"GPS Skipped Rows: {Processor._hide_others(self._gps_invalid_rows, 5)}", + "Min Video Latency: N/A" if self._min_latency is None else f"Min Video Latency: {self._min_latency:.2f} MS", + "Max Video Latency: N/A" if self._max_latency is None else f"Max Video Latency: {self._max_latency:.2f} MS" ) def erase_unit_cache(self) -> None: diff --git a/leads/data_persistence/analyzer/utils.py b/leads/data_persistence/analyzer/utils.py index 2277a9c2..a8e6fee9 100644 --- a/leads/data_persistence/analyzer/utils.py +++ b/leads/data_persistence/analyzer/utils.py @@ -9,23 +9,27 @@ def time_invalid(o: _Any) -> bool: def speed_invalid(o: _Any) -> bool: - return not isinstance(o, int | float) or o != o or o < 0 + return not isinstance(o, int | float) or o < 0 def acceleration_invalid(o: _Any) -> bool: - return not isinstance(o, int | float) or o != o + return not isinstance(o, int | float) def mileage_invalid(o: _Any) -> bool: - return not isinstance(o, int | float) or o != o + return not isinstance(o, int | float) def latitude_invalid(o: _Any) -> bool: - return not isinstance(o, int | float) or o != o or not -90 < o < 90 + return not isinstance(o, int | float) or not -90 < o < 90 def longitude_invalid(o: _Any) -> bool: - return not isinstance(o, int | float) or o != o or not -180 < o < 180 + return not isinstance(o, int | float) or not -180 < o < 180 + + +def latency_invalid(o: _Any) -> bool: + return not isinstance(o, int | float) def distance_between(lat_0: float, lon_0: float, lat: float, lon: float) -> float: diff --git a/leads/data_persistence/core.py b/leads/data_persistence/core.py index 236a39b5..7fe5f308 100644 --- a/leads/data_persistence/core.py +++ b/leads/data_persistence/core.py @@ -4,6 +4,8 @@ override as _override, Self as _Self, Iterator as _Iterator, Callable as _Callable, Iterable as _Iterable, \ Generator as _Generator, Any as _Any +from numpy import nan as _nan + from leads.types import Compressor as _Compressor, VisualHeader as _VisualHeader, VisualHeaderFull as _VisualHeaderFull from ._computational import mean as _mean, array as _array, norm as _norm, read_csv as _read_csv, \ DataFrame as _DataFrame, TextFileReader as _TextFileReader @@ -218,7 +220,7 @@ def __iter__(self) -> _Generator[dict[str, _Any], None, None]: except StopIteration: break for i in range(len(chunk)): - r = chunk.iloc[i].to_dict() + r = chunk.iloc[i].replace(_nan, None).to_dict() if self._contains_index: r.pop("index") yield r diff --git a/leads/dt/registry.py b/leads/dt/registry.py index 36cdd043..9603e097 100644 --- a/leads/dt/registry.py +++ b/leads/dt/registry.py @@ -61,7 +61,7 @@ def register_controller(tag: str, c: Controller, parent: str | None = None) -> N def has_controller(tag: str) -> bool: - return tag in _controllers.keys() + return tag in _controllers def get_controller(tag: str) -> Controller: @@ -79,7 +79,7 @@ def _register_device(prototype: type[Device], def has_device(tag: str) -> bool: - return tag in _devices.keys() + return tag in _devices def get_device(tag: str) -> Device: diff --git a/leads_vec/cli.py b/leads_vec/cli.py index 7c049816..de2c4f26 100644 --- a/leads_vec/cli.py +++ b/leads_vec/cli.py @@ -214,9 +214,9 @@ def render(manager: ContextManager) -> None: if cfg.comm_stream: manager["comm_stream_status"] = _Label(root, text="STM OFFLINE", text_color="gray", font=("Arial", cfg.font_size_small)) - i = 0 + j = 0 for system in SystemLiteral: - i += 1 + j += 1 system_lower = system.lower() manager[f"{system_lower}_status"] = _Label(root, text=f"{system} READY", text_color="green", font=("Arial", cfg.font_size_small)) diff --git a/leads_vec/run.py b/leads_vec/run.py index 48ebc1f1..5689bcaa 100644 --- a/leads_vec/run.py +++ b/leads_vec/run.py @@ -17,6 +17,7 @@ def run(config: str | None, devices: str, main: str, register: _Literal["systemd _create_service() _L.debug("Service registered") _L.debug(f"Service script is located at \"{_abspath(__file__)[:-6]}_bootloader/leads-vec.service.sh\"") + return 0 case "config": if _exists("config.json"): r = input("\"config.json\" already exists. Overwrite? (Y/n) >>>").lower() @@ -26,6 +27,7 @@ def run(config: str | None, devices: str, main: str, register: _Literal["systemd with open("config.json", "w") as f: f.write(str(Config({}))) _L.debug("Configuration file saved to \"config.json\"") + return 0 case "reverse_proxy": from ._bootloader import start_frpc as _start_frpc diff --git a/leads_vec_dp/__entry__.py b/leads_vec_dp/__entry__.py new file mode 100644 index 00000000..085666a1 --- /dev/null +++ b/leads_vec_dp/__entry__.py @@ -0,0 +1,13 @@ +from argparse import ArgumentParser as _ArgumentParser +from sys import exit as _exit + +from leads_vec_dp.run import run + + +def __entry__() -> None: + parser = _ArgumentParser(prog="LEADS VeC DP", + description="Lightweight Embedded Assisted Driving System VeC Data Processor", + epilog="GitHub: https://github.com/ProjectNeura/LEADS") + parser.add_argument("workflow", help="specify a workflow file") + args = parser.parse_args() + _exit(run(args.workflow)) diff --git a/leads_vec_dp/__init__.py b/leads_vec_dp/__init__.py new file mode 100644 index 00000000..8a221d84 --- /dev/null +++ b/leads_vec_dp/__init__.py @@ -0,0 +1,7 @@ +from importlib.util import find_spec as _find_spec + +if not _find_spec("yaml"): + raise ImportError("Please install `pyyaml` to run this module\n>>>pip install pyyaml") + +from leads_vec_dp.__entry__ import __entry__ +from leads_vec_dp.run import * diff --git a/leads_vec_dp/__main__.py b/leads_vec_dp/__main__.py new file mode 100644 index 00000000..cf452e20 --- /dev/null +++ b/leads_vec_dp/__main__.py @@ -0,0 +1,4 @@ +from leads_vec_dp.__entry__ import __entry__ + +if __name__ == "__main__": + __entry__() diff --git a/leads_vec_dp/run.py b/leads_vec_dp/run.py new file mode 100644 index 00000000..934c7285 --- /dev/null +++ b/leads_vec_dp/run.py @@ -0,0 +1,79 @@ +from atexit import register as _register +from typing import Any as _Any + +from yaml import load as _load, SafeLoader as _SafeLoader + +from leads import L as _L +from leads.data_persistence import CSVDataset as _CSVDataset +from leads.data_persistence.analyzer import InferredDataset as _InferredDataset, Inference as _Inference, \ + SafeSpeedInference as _SafeSpeedInference, SpeedInferenceByAcceleration as _SpeedInferenceByAcceleration, \ + SpeedInferenceByMileage as _SpeedInferenceByMileage, \ + SpeedInferenceByGPSGroundSpeed as _SpeedInferenceByGPSGroundSpeed, \ + SpeedInferenceByGPSPosition as _SpeedInferenceByGPSPosition, \ + ForwardAccelerationInferenceBySpeed as _ForwardAccelerationInferenceBySpeed, \ + MileageInferenceBySpeed as _MileageInferenceBySpeed, \ + MileageInferenceByGPSPosition as _MileageInferenceByGPSPosition, \ + VisualDataRealignmentByLatency as _VisualDataRealignmentByLatency +from leads.data_persistence.analyzer.processor import Processor as _Processor +from leads_video import extract_video as _extract_video + +INFERENCE_METHODS: dict[str, type[_Inference]] = { + "safe-speed": _SafeSpeedInference, + "speed-by-acceleration": _SpeedInferenceByAcceleration, + "speed-by-mileage": _SpeedInferenceByMileage, + "speed-by-gps-ground-speed": _SpeedInferenceByGPSGroundSpeed, + "speed-by-gps-position": _SpeedInferenceByGPSPosition, + "forward-acceleration-by-speed": _ForwardAccelerationInferenceBySpeed, + "milage-by-speed": _MileageInferenceBySpeed, + "milage-by-gps-position": _MileageInferenceByGPSPosition, + "visual-data-realignment-by-latency": _VisualDataRealignmentByLatency +} + + +def _optional_kwargs(source: dict[str, _Any], key: str) -> dict[str, _Any]: + return source[key] if key in source else {} + + +def run(target: str) -> int: + with open(target) as f: + target = _load(f.read(), _SafeLoader) + if "inferences" in target: + dataset = _InferredDataset(target["dataset"]) + inferences = target["inferences"] + methods = [] + for method in inferences["methods"]: + methods.append(INFERENCE_METHODS[method]()) + inferences.pop("methods") + repeat = 1 + if "repeat" in inferences: + repeat = inferences["repeat"] + inferences.pop("repeat") + for _ in range(repeat): + _L.info(f"Affected {(n := dataset.complete(*methods, **inferences))} row{"s" if n > 1 else ""}") + else: + dataset = _CSVDataset(target["dataset"]) + _register(dataset.close) + processor = _Processor(dataset) + for job in target["jobs"]: + _L.info(f"Executing job {job["name"]}...") + match job["uses"]: + case "bake": + processor.bake() + _L.info("Baking Results", *processor.baking_results(), sep="\n") + case "process": + processor.process(**_optional_kwargs(job, "with")) + _L.info("Results", *processor.results(), sep="\n") + case "draw-lap": + processor.draw_lap(**_optional_kwargs(job, "with")) + case "suggest-on-lap": + _L.info(*processor.suggest_on_lap(job["with"]["lap_index"]), sep="\n") + case "draw-comparison-of-laps": + processor.draw_comparison_of_laps(**_optional_kwargs(job, "with")) + case "extract-video": + _extract_video(dataset, file := job["with"]["file"], job["with"]["tag"]) + _L.info(f"Video saved as {file}") + case "save-as": + dataset.save(file := job["with"]["file"]) + _L.info(f"Dataset saved as {file}") + + return 0 diff --git a/leads_vec_rc/__entry__.py b/leads_vec_rc/__entry__.py index a1908fe0..abe757d4 100644 --- a/leads_vec_rc/__entry__.py +++ b/leads_vec_rc/__entry__.py @@ -16,4 +16,4 @@ def __entry__() -> None: _register_config(_load_config(args.config, Config) if args.config else Config({})) from leads_vec_rc.cli import app - _run(app, host="0.0.0.0", port=args.port, log_level="warning") \ No newline at end of file + _run(app, host="0.0.0.0", port=args.port, log_level="warning") diff --git a/leads_video/types.py b/leads_video/types.py deleted file mode 100644 index 6e29066c..00000000 --- a/leads_video/types.py +++ /dev/null @@ -1,3 +0,0 @@ -from typing import Literal as _Literal - -type VideoTag = _Literal["front_view_base64", "left_view_base64", "right_view_base64", "rear_view_base64"] diff --git a/leads_video/utils.py b/leads_video/utils.py index 3948212e..3395c9c3 100644 --- a/leads_video/utils.py +++ b/leads_video/utils.py @@ -1,7 +1,7 @@ from base64 import b64decode as _b64decode from binascii import Error as _BinasciiError from io import BytesIO as _BytesIO -from typing import Any as _Any +from typing import Any as _Any, Literal as _Literal from PIL.Image import Image as _Image, open as _open, UnidentifiedImageError as _UnidentifiedImageError from cv2 import VideoWriter as _VideoWriter, VideoWriter_fourcc as _VideoWriter_fourcc, cvtColor as _cvtColor, \ @@ -11,7 +11,6 @@ from leads import has_device as _has_device, get_device as _get_device from leads.data_persistence import CSVDataset as _CSVDataset from leads_video.camera import Camera -from leads_video.types import VideoTag as _VideoTag def get_camera(tag: str, required_type: type[Camera] = Camera) -> Camera | None: @@ -23,15 +22,16 @@ def get_camera(tag: str, required_type: type[Camera] = Camera) -> Camera | None: return cam -def _decode_frame(row: dict[str, _Any], tag: _VideoTag) -> _Image: +def _decode_frame(row: dict[str, _Any], tag: str) -> _Image: if not (frame := row[tag]): raise ValueError return _open(_BytesIO(_b64decode(frame))) -def extract_video(file: str, dataset: _CSVDataset, tag: _VideoTag) -> None: +def extract_video(dataset: _CSVDataset, file: str, tag: _Literal["front", "left", "right", "rear"]) -> None: if not file.endswith(".mp4"): file += ".mp4" + tag = f"{tag}_view_base64" prev_row = None resolution = None fps = 0 diff --git a/pyproject.toml b/pyproject.toml index f16ad5b5..168fee8b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -16,25 +16,22 @@ dependencies = ["numpy", "pandas"] [project.optional-dependencies] standard = [ - "Pillow", "PySDL2", "customtkinter", "gpiozero", "lgpio", "opencv-python-headless", "pynmea2", "pynput", - "pysdl2-dll", "pyserial", "screeninfo" -] -no-gpio = [ - "Pillow", "PySDL2", "customtkinter", "gpiozero", "opencv-python-headless", "pynmea2", "pynput", "pysdl2-dll", - "pyserial", "screeninfo" -] -all = [ - "Pillow", "PySDL2", "customtkinter", "fastapi[standard]", "gpiozero", "lgpio", "opencv-python-headless", "pynmea2", - "pynput", "pysdl2-dll", "pyserial", "screeninfo" + "Pillow", "PySDL2", "customtkinter", "gpiozero", "opencv-python-headless", "pynmea2", "pysdl2-dll", "pyserial", + "screeninfo" ] +gpio = ["leads[standard]", "lgpio"] +vec = ["leads[gpio]", "pynput"] +vec-no-gpio = ["leads[standard]", "pynput"] +vec-rc = ["leads[standard]", "fastapi[standard]"] +vec-dp = ["leads[standard]", "matplotlib", "pyyaml"] [tool.hatch.build.targets.sdist] only-include = ["leads", "leads_arduino", "leads_audio", "leads_can", "leads_comm_serial", "leads_emulation", - "leads_gpio", "leads_gui", "leads_video", "leads_vec", "leads_vec_rc", "design", "docs"] + "leads_gpio", "leads_gui", "leads_video", "leads_vec", "leads_vec_rc", "leads_vec_dp", "design", "docs"] [tool.hatch.build.targets.wheel] packages = ["leads", "leads_arduino", "leads_audio", "leads_can", "leads_comm_serial", "leads_emulation", "leads_gpio", - "leads_gui", "leads_video", "leads_vec", "leads_vec_rc"] + "leads_gui", "leads_video", "leads_vec", "leads_vec_rc", "leads_vec_dp"] [project.urls] Homepage = "https://leads.projectneura.org" @@ -43,6 +40,7 @@ Repository = "https://github.com/ProjectNeura/LEADS" [project.scripts] leads-vec-rc = "leads_vec_rc:__entry__" +leads-vec-dp = "leads_vec_dp:__entry__" [project.gui-scripts] leads-vec = "leads_vec:__entry__"