From 04345a15ff754aa7ee5810fc2495f2fae7d4fef6 Mon Sep 17 00:00:00 2001 From: Yazhou Cao Date: Tue, 26 Mar 2024 15:34:26 -0700 Subject: [PATCH 1/2] Polish API docs, auto publish docs in CI --- .github/workflows/docs.yml | 48 +++++++++++ .gitignore | 1 + docs/_overrides/main.html | 5 ++ docs/api/agent.md | 13 +++ docs/api/data.md | 3 + docs/api/emb.md | 3 + docs/api/image_utils.md | 1 + docs/api/llm.md | 3 + docs/api/lmm.md | 3 + docs/api/tools.md | 5 ++ docs/index.md | 132 +++++++++++++++-------------- docs/old.md | 87 +++++++++++++++++++ mkdocs.yml | 43 ++++++++++ vision_agent/agent/easytool.py | 12 ++- vision_agent/agent/reflexion.py | 12 ++- vision_agent/agent/vision_agent.py | 12 ++- vision_agent/image_utils.py | 31 ++++++- vision_agent/tools/tools.py | 59 +++++++++++-- 18 files changed, 398 insertions(+), 75 deletions(-) create mode 100644 .github/workflows/docs.yml create mode 100644 docs/_overrides/main.html create mode 100644 docs/api/agent.md create mode 100644 docs/api/data.md create mode 100644 docs/api/emb.md create mode 100644 docs/api/image_utils.md create mode 100644 docs/api/llm.md create mode 100644 docs/api/lmm.md create mode 100644 docs/api/tools.md create mode 100644 docs/old.md create mode 100644 mkdocs.yml diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 00000000..c8dab4b2 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,48 @@ +name: pdoc + +# build the documentation whenever there are new commits on main +on: + push: + branches: + - main + +# security: restrict permissions for CI jobs. +permissions: + contents: read + +jobs: + # Build the documentation and upload the static HTML files as an artifact. + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - uses: actions/setup-python@v4 + with: + python-version: 3.10.11 + + - uses: Gr1N/setup-poetry@v8 + with: + poetry-version: "1.2.2" + + - run: poetry install --all-extras + - run: mkdir -p docs-build + - run: poetry run mkdocs build -f mkdocs.yml -d docs-build/ + + - uses: actions/upload-pages-artifact@v1 + with: + path: docs-build/ + + # Deploy the artifact to GitHub pages. + # This is a separate job so that only actions/deploy-pages has the necessary permissions. + deploy: + needs: build + runs-on: ubuntu-latest + permissions: + pages: write + id-token: write + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + steps: + - id: deployment + uses: actions/deploy-pages@v2 diff --git a/.gitignore b/.gitignore index 5faf2c08..4a5a9be5 100644 --- a/.gitignore +++ b/.gitignore @@ -89,6 +89,7 @@ MANIFEST examples/output tests/output docs-build +site # Local or WIP files local/ \ No newline at end of file diff --git a/docs/_overrides/main.html b/docs/_overrides/main.html new file mode 100644 index 00000000..73474d30 --- /dev/null +++ b/docs/_overrides/main.html @@ -0,0 +1,5 @@ +{% extends "base.html" %} + +{% block footer %} + {{ super() }} +{% endblock %} \ No newline at end of file diff --git a/docs/api/agent.md b/docs/api/agent.md new file mode 100644 index 00000000..2cfefd54 --- /dev/null +++ b/docs/api/agent.md @@ -0,0 +1,13 @@ +::: vision_agent.agent + +::: vision_agent.agent.agent + +::: vision_agent.agent.easytool + +::: vision_agent.agent.easytool_prompts + +::: vision_agent.agent.reflexion + +::: vision_agent.agent.reflexion_prompts + +::: vision_agent.agent.vision_agent diff --git a/docs/api/data.md b/docs/api/data.md new file mode 100644 index 00000000..2e157f7c --- /dev/null +++ b/docs/api/data.md @@ -0,0 +1,3 @@ +::: vision_agent.data + +::: vision_agent.data.data \ No newline at end of file diff --git a/docs/api/emb.md b/docs/api/emb.md new file mode 100644 index 00000000..1421ef8d --- /dev/null +++ b/docs/api/emb.md @@ -0,0 +1,3 @@ +::: vision_agent.emb + +::: vision_agent.emb.emb \ No newline at end of file diff --git a/docs/api/image_utils.md b/docs/api/image_utils.md new file mode 100644 index 00000000..f9ae1ed6 --- /dev/null +++ b/docs/api/image_utils.md @@ -0,0 +1 @@ +::: vision_agent.image_utils \ No newline at end of file diff --git a/docs/api/llm.md b/docs/api/llm.md new file mode 100644 index 00000000..0588e797 --- /dev/null +++ b/docs/api/llm.md @@ -0,0 +1,3 @@ +::: vision_agent.llm + +::: vision_agent.llm.llm \ No newline at end of file diff --git a/docs/api/lmm.md b/docs/api/lmm.md new file mode 100644 index 00000000..2c4f433a --- /dev/null +++ b/docs/api/lmm.md @@ -0,0 +1,3 @@ +::: vision_agent.lmm + +::: vision_agent.lmm.lmm \ No newline at end of file diff --git a/docs/api/tools.md b/docs/api/tools.md new file mode 100644 index 00000000..fa3fba93 --- /dev/null +++ b/docs/api/tools.md @@ -0,0 +1,5 @@ +::: vision_agent.tools + +::: vision_agent.tools.prompts + +::: vision_agent.tools.tools \ No newline at end of file diff --git a/docs/index.md b/docs/index.md index c585fd03..2a83bef7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -1,87 +1,95 @@ -

- -

+# 🔍🤖 Vision Agent -# Welcome to the Landing AI LMM Tools Documentation +Vision Agent is a library that helps you utilize agent frameworks for your vision tasks. +Many current vision problems can easily take hours or days to solve, you need to find the +right model, figure out how to use it, possibly write programming logic around it to +accomplish the task you want or even more expensive, train your own model. Vision Agent +aims to provide an in-seconds experience by allowing users to describe their problem in +text and utilizing agent frameworks to solve the task for them. Check out our discord +for updates and roadmaps! -This library provides a set of tools to help you build applications with Large Multimodal Model (LMM). - - -## Quick Start - -### Install -First, install the library: +## Getting Started +### Installation +To get started, you can install the library using pip: ```bash pip install vision-agent ``` -### LMMs -One of the problems of dealing with image data is it can be difficult to organize and -search. For example, you might have a bunch of pictures of houses and want to count how -many yellow houses you have, or how many houses with adobe roofs. The vision agent -library uses LMMs to help create tags or descriptions of images to allow you to search -over them, or use them in a database to carry out other operations. - -To get started, you can use an LMM to start generating text from images. The following -code will use the LLaVA-1.6 34B model to generate a description of the image you pass it. +Ensure you have an OpenAI API key and set it as an environment variable: -```python -import vision_agent as va - -model = va.lmm.get_lmm("llava") -model.generate("Describe this image", "image.png") ->>> "A yellow house with a green lawn." +```bash +export OPENAI_API_KEY="your-api-key" ``` -**WARNING** We are hosting the LLaVA-1.6 34B model, if it times out please wait ~3-5 -min for the server to warm up as it shuts down when usage is low. - -### DataStore -You can use the `DataStore` class to store your images, add new metadata to them such -as descriptions, and search over different columns. +### Vision Agents +You can interact with the agents as you would with any LLM or LMM model: ```python -import vision_agent as va -import pandas as pd - -df = pd.DataFrame({"image_paths": ["image1.png", "image2.png", "image3.png"]}) -ds = va.data.DataStore(df) -ds = ds.add_lmm(va.lmm.get_lmm("llava")) -ds = ds.add_embedder(va.emb.get_embedder("sentence-transformer")) - -ds = ds.add_column("descriptions", "Describe this image.") +>>> import vision_agent as va +>>> agent = VisionAgent() +>>> agent("How many apples are in this image?", image="apples.jpg") +"There are 2 apples in the image." ``` -This will use the prompt you passed, "Describe this image.", and the LMM to create a -new column of descriptions for your image. Your data will now contain a new column with -the descriptions of each image: +To better understand how the model came up with it's answer, you can also run it in +debug mode by passing in the verbose argument: -| image\_paths | image\_id | descriptions | -| --- | --- | --- | -| image1.png | 1 | "A yellow house with a green lawn." | -| image2.png | 2 | "A white house with a two door garage." | -| image3.png | 3 | "A wooden house in the middle of the forest." | +```python +>>> agent = VisionAgent(verbose=True) +``` -You can now create an index on the descriptions column and search over it to find images -that match your query. +You can also have it return the workflow it used to complete the task along with all +the individual steps and tools to get the answer: ```python -ds = ds.build_index("descriptions") -ds.search("A yellow house.", top_k=1) ->>> [{'image_paths': 'image1.png', 'image_id': 1, 'descriptions': 'A yellow house with a green lawn.'}] +>>> resp, workflow = agent.chat_with_workflow([{"role": "user", "content": "How many apples are in this image?"}], image="apples.jpg") +>>> print(workflow) +[{"task": "Count the number of apples using 'grounding_dino_'.", + "tool": "grounding_dino_", + "parameters": {"prompt": "apple", "image": "apples.jpg"}, + "call_results": [[ + { + "labels": ["apple", "apple"], + "scores": [0.99, 0.95], + "bboxes": [ + [0.58, 0.2, 0.72, 0.45], + [0.94, 0.57, 0.98, 0.66], + ] + } + ]], + "answer": "There are 2 apples in the image.", +}] ``` -You can also create other columns for you data such as `is_yellow`: +### Tools +There are a variety of tools for the model or the user to use. Some are executed locally +while others are hosted for you. You can also ask an LLM directly to build a tool for +you. For example: ```python -ds = ds.add_column("is_yellow", "Is the house in this image yellow? Please answer yes or no.") +>>> import vision_agent as va +>>> llm = va.llm.OpenAILLM() +>>> detector = llm.generate_detector("Can you build an apple detector for me?") +>>> detector("apples.jpg") +[{"labels": ["apple", "apple"], + "scores": [0.99, 0.95], + "bboxes": [ + [0.58, 0.2, 0.72, 0.45], + [0.94, 0.57, 0.98, 0.66], + ] +}] ``` -which would give you a dataset similar to this: +| Tool | Description | +| --- | --- | +| CLIP | CLIP is a tool that can classify or tag any image given a set of input classes or tags. | +| GroundingDINO | GroundingDINO is a tool that can detect arbitrary objects with inputs such as category names or referring expressions. | +| GroundingSAM | GroundingSAM is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions. | +| Counter | Counter detects and counts the number of objects in an image given an input such as a category name or referring expression. | +| Crop | Crop crops an image given a bounding box and returns a file name of the cropped image. | +| BboxArea | BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places. | +| SegArea | SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places. | + -| image\_paths | image\_id | descriptions | is\_yellow | -| --- | --- | --- | --- | -| image1.png | 1 | "A yellow house with a green lawn." | "yes" | -| image2.png | 2 | "A white house with a two door garage." | "no" | -| image3.png | 3 | "A wooden house in the middle of the forest." | "no" | +It also has a basic set of calculate tools such as add, subtract, multiply and divide. diff --git a/docs/old.md b/docs/old.md new file mode 100644 index 00000000..c585fd03 --- /dev/null +++ b/docs/old.md @@ -0,0 +1,87 @@ +

+ +

+ +# Welcome to the Landing AI LMM Tools Documentation + +This library provides a set of tools to help you build applications with Large Multimodal Model (LMM). + + +## Quick Start + +### Install +First, install the library: + +```bash +pip install vision-agent +``` + +### LMMs +One of the problems of dealing with image data is it can be difficult to organize and +search. For example, you might have a bunch of pictures of houses and want to count how +many yellow houses you have, or how many houses with adobe roofs. The vision agent +library uses LMMs to help create tags or descriptions of images to allow you to search +over them, or use them in a database to carry out other operations. + +To get started, you can use an LMM to start generating text from images. The following +code will use the LLaVA-1.6 34B model to generate a description of the image you pass it. + +```python +import vision_agent as va + +model = va.lmm.get_lmm("llava") +model.generate("Describe this image", "image.png") +>>> "A yellow house with a green lawn." +``` + +**WARNING** We are hosting the LLaVA-1.6 34B model, if it times out please wait ~3-5 +min for the server to warm up as it shuts down when usage is low. + +### DataStore +You can use the `DataStore` class to store your images, add new metadata to them such +as descriptions, and search over different columns. + +```python +import vision_agent as va +import pandas as pd + +df = pd.DataFrame({"image_paths": ["image1.png", "image2.png", "image3.png"]}) +ds = va.data.DataStore(df) +ds = ds.add_lmm(va.lmm.get_lmm("llava")) +ds = ds.add_embedder(va.emb.get_embedder("sentence-transformer")) + +ds = ds.add_column("descriptions", "Describe this image.") +``` + +This will use the prompt you passed, "Describe this image.", and the LMM to create a +new column of descriptions for your image. Your data will now contain a new column with +the descriptions of each image: + +| image\_paths | image\_id | descriptions | +| --- | --- | --- | +| image1.png | 1 | "A yellow house with a green lawn." | +| image2.png | 2 | "A white house with a two door garage." | +| image3.png | 3 | "A wooden house in the middle of the forest." | + +You can now create an index on the descriptions column and search over it to find images +that match your query. + +```python +ds = ds.build_index("descriptions") +ds.search("A yellow house.", top_k=1) +>>> [{'image_paths': 'image1.png', 'image_id': 1, 'descriptions': 'A yellow house with a green lawn.'}] +``` + +You can also create other columns for you data such as `is_yellow`: + +```python +ds = ds.add_column("is_yellow", "Is the house in this image yellow? Please answer yes or no.") +``` + +which would give you a dataset similar to this: + +| image\_paths | image\_id | descriptions | is\_yellow | +| --- | --- | --- | --- | +| image1.png | 1 | "A yellow house with a green lawn." | "yes" | +| image2.png | 2 | "A white house with a two door garage." | "no" | +| image3.png | 3 | "A wooden house in the middle of the forest." | "no" | diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 00000000..9dd45949 --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,43 @@ +site_name: Landing AI Vision Agent Library Documentation +site_url: https://landing-ai.github.io/ +repo_url: https://github.com/landing-ai/vision-agent +edit_uri: edit/main/docs/ + + +theme: + name: "material" + custom_dir: docs/_overrides + features: + - content.code.copy + - content.code.annotate + - content.action.edit + +plugins: + - mkdocstrings + - search + +markdown_extensions: + # Syntax highlight + - pymdownx.highlight: + anchor_linenums: true + line_spans: __span + pygments_lang_class: true + - pymdownx.inlinehilite + - pymdownx.snippets + - pymdownx.superfences + + # Multiline note/warning/etc blocks (https://squidfunk.github.io/mkdocs-material/reference/admonitions) + - admonition + - pymdownx.details + +nav: + - Quick start: index.md + - APIs: + - vision_agent.agent: api/agent.md + - vision_agent.tools: api/tools.md + - vision_agent.llm: api/llm.md + - vision_agent.lmm: api/lmm.md + - vision_agent.data: api/data.md + - vision_agent.emb: api/emb.md + - vision_agent.image_utils: api/image_utils.md + - Old documentation: old.md diff --git a/vision_agent/agent/easytool.py b/vision_agent/agent/easytool.py index 83865bcb..72a6fd75 100644 --- a/vision_agent/agent/easytool.py +++ b/vision_agent/agent/easytool.py @@ -241,7 +241,8 @@ class EasyTool(Agent): based on the original implementation https://github.com/microsoft/JARVIS/tree/main/easytool from the funcQA code. - Examples:: + Example + ------- >>> from vision_agent.agent import EasyTool >>> agent = EasyTool() >>> resp = agent("If a car is traveling at 64 km/h, how many kilometers does it travel in 29 minutes?") @@ -273,6 +274,15 @@ def __call__( input: Union[List[Dict[str, str]], str], image: Optional[Union[str, Path]] = None, ) -> str: + """Invoke the vision agent. + + Parameters: + input: a prompt that describe the task or a conversation in the format of [{"role": "user", "content": "describe your task here..."}]. + image: the input image referenced in the prompt parameter. + + Returns: + A text response. + """ if isinstance(input, str): input = [{"role": "user", "content": input}] return self.chat(input, image=image) diff --git a/vision_agent/agent/reflexion.py b/vision_agent/agent/reflexion.py index e24fed83..ac7d77b6 100644 --- a/vision_agent/agent/reflexion.py +++ b/vision_agent/agent/reflexion.py @@ -68,7 +68,8 @@ class Reflexion(Agent): self_reflect_model. Using Reflexion with LMMs may not work well, if it gets it wrong the first time, chances are it can't actually see the thing you want it to see. - Examples:: + Example + ------- >>> from vision_agent.agent import Reflexion >>> agent = Reflexion() >>> question = "How many tires does a truck have?" @@ -139,6 +140,15 @@ def __call__( input: Union[str, List[Dict[str, str]]], image: Optional[Union[str, Path]] = None, ) -> str: + """Invoke the vision agent. + + Parameters: + input: a prompt that describe the task or a conversation in the format of [{"role": "user", "content": "describe your task here..."}]. + image: the input image referenced in the prompt parameter. + + Returns: + A text response. + """ if isinstance(input, str): input = [{"role": "user", "content": input}] return self.chat(input, image) diff --git a/vision_agent/agent/vision_agent.py b/vision_agent/agent/vision_agent.py index 73a41184..670dc4d2 100644 --- a/vision_agent/agent/vision_agent.py +++ b/vision_agent/agent/vision_agent.py @@ -339,7 +339,8 @@ class VisionAgent(Agent): reflect on whether or not it was able to accomplish the task based off of the plan and final results, if not it will redo the task with this newly added reflection. - Examples:: + Example + ------- >>> from vision_agent.agent import VisionAgent >>> agent = VisionAgent() >>> resp = agent("If red tomatoes cost $5 each and yellow tomatoes cost $2.50 each, what is the total cost of all the tomatoes in the image?", image="tomatoes.jpg") @@ -371,6 +372,15 @@ def __call__( input: Union[List[Dict[str, str]], str], image: Optional[Union[str, Path]] = None, ) -> str: + """Invoke the vision agent. + + Parameters: + input: a prompt that describe the task or a conversation in the format of [{"role": "user", "content": "describe your task here..."}]. + image: the input image referenced in the prompt parameter. + + Returns: + The result of the vision agent in text. + """ if isinstance(input, str): input = [{"role": "user", "content": input}] return self.chat(input, image=image) diff --git a/vision_agent/image_utils.py b/vision_agent/image_utils.py index 9ad2bdaa..96699f96 100644 --- a/vision_agent/image_utils.py +++ b/vision_agent/image_utils.py @@ -1,3 +1,5 @@ +"""Utility functions for image processing.""" + import base64 from io import BytesIO from pathlib import Path @@ -9,6 +11,14 @@ def b64_to_pil(b64_str: str) -> ImageType: + """Convert a base64 string to a PIL Image. + + Parameters: + b64_str: the base64 encoded image + + Returns: + The decoded PIL Image + """ # , can't be encoded in b64 data so must be part of prefix if "," in b64_str: b64_str = b64_str.split(",")[1] @@ -16,16 +26,29 @@ def b64_to_pil(b64_str: str) -> ImageType: def get_image_size(data: Union[str, Path, np.ndarray, ImageType]) -> Tuple[int, ...]: + """Get the size of an image. + + Parameters: + data: the input image + + Returns: + The size of the image in the form (height, width) + """ if isinstance(data, (str, Path)): data = Image.open(data) - if isinstance(data, Image.Image): - return data.size[::-1] - else: - return data.shape[:2] + return data.size[::-1] if isinstance(data, Image.Image) else data.shape[:2] def convert_to_b64(data: Union[str, Path, np.ndarray, ImageType]) -> str: + """Convert an image to a base64 string. + + Parameters: + data: the input image + + Returns: + The base64 encoded image + """ if data is None: raise ValueError(f"Invalid input image: {data}. Input image can't be None.") if isinstance(data, (str, Path)): diff --git a/vision_agent/tools/tools.py b/vision_agent/tools/tools.py index 197d260a..60c1cc00 100644 --- a/vision_agent/tools/tools.py +++ b/vision_agent/tools/tools.py @@ -30,7 +30,7 @@ def normalize_bbox( def rle_decode(mask_rle: str, shape: Tuple[int, int]) -> np.ndarray: r"""Decode a run-length encoded mask. Returns numpy array, 1 - mask, 0 - background. - Args: + Parameters: mask_rle: Run-length as string formated (start length) shape: The (height, width) of array to return """ @@ -54,7 +54,8 @@ class CLIP(Tool): r"""CLIP is a tool that can classify or tag any image given a set if input classes or tags. - Examples:: + Example + ------- >>> import vision_agent as va >>> clip = va.tools.CLIP() >>> clip(["red line", "yellow dot"], "ct_scan1.jpg")) @@ -89,7 +90,17 @@ class CLIP(Tool): ], } + # TODO: Add support for input multiple images, which aligns with the output type. def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict]: + """Invoke the CLIP model. + + Parameters: + prompt: a list of classes or tags to classify the image. + image: the input image to classify. + + Returns: + A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + """ image_b64 = convert_to_b64(image) data = { "classes": prompt, @@ -117,7 +128,8 @@ class GroundingDINO(Tool): r"""Grounding DINO is a tool that can detect arbitrary objects with inputs such as category names or referring expressions. - Examples:: + Example + ------- >>> import vision_agent as va >>> t = va.tools.GroundingDINO() >>> t("red line. yellow dot", "ct_scan1.jpg") @@ -154,7 +166,17 @@ class GroundingDINO(Tool): ], } + # TODO: Add support for input multiple images, which aligns with the output type. def __call__(self, prompt: str, image: Union[str, Path, ImageType]) -> List[Dict]: + """Invoke the Grounding DINO model. + + Parameters: + prompt: one or multiple class names to detect. The classes should be separated by a period if there are multiple classes. E.g. "big dog . small cat" + image: the input image to run against. + + Returns: + A list of dictionaries containing the labels, scores, and bboxes. Each dictionary contains the detection result for an image. + """ image_size = get_image_size(image) image_b64 = convert_to_b64(image) data = { @@ -188,7 +210,8 @@ class GroundingSAM(Tool): r"""Grounding SAM is a tool that can detect and segment arbitrary objects with inputs such as category names or referring expressions. - Examples:: + Example + ------- >>> import vision_agent as va >>> t = va.tools.GroundingSAM() >>> t(["red line", "yellow dot"], ct_scan1.jpg"]) @@ -234,7 +257,17 @@ class GroundingSAM(Tool): ], } + # TODO: Add support for input multiple images, which aligns with the output type. def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict]: + """Invoke the Grounding SAM model. + + Parameters: + prompt: a list of classes to segment. + image: the input image to segment. + + Returns: + A list of dictionaries containing the labels, scores, bboxes and masks. Each dictionary contains the segmentation result for an image. + """ image_size = get_image_size(image) image_b64 = convert_to_b64(image) data = { @@ -260,8 +293,7 @@ def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict ret_pred["labels"].append(pred["label_name"]) ret_pred["bboxes"].append(normalize_bbox(pred["bbox"], image_size)) ret_pred["masks"].append(mask) - ret_preds = [ret_pred] - return ret_preds + return [ret_pred] class AgentGroundingSAM(GroundingSAM): @@ -282,6 +314,8 @@ def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict class Counter(Tool): + r"""Counter detects and counts the number of objects in an image given an input such as a category name or referring expression.""" + name = "counter_" description = "'counter_' detects and counts the number of objects in an image given an input such as a category name or referring expression." usage = { @@ -307,6 +341,7 @@ def __call__(self, prompt: str, image: Union[str, ImageType]) -> Dict: class Crop(Tool): + r"""Crop crops an image given a bounding box and returns a file name of the cropped image.""" name = "crop_" description = "'crop_' crops an image given a bounding box and returns a file name of the cropped image." usage = { @@ -343,6 +378,8 @@ def __call__(self, bbox: List[float], image: Union[str, Path]) -> str: class BboxArea(Tool): + r"""BboxArea returns the area of the bounding box in pixels normalized to 2 decimal places.""" + name = "bbox_area_" description = "'bbox_area_' returns the area of the bounding box in pixels normalized to 2 decimal places." usage = { @@ -371,6 +408,8 @@ def __call__(self, bboxes: List[Dict]) -> List[Dict]: class SegArea(Tool): + r"""SegArea returns the area of the segmentation mask in pixels normalized to 2 decimal places.""" + name = "seg_area_" description = "'seg_area_' returns the area of the segmentation mask in pixels normalized to 2 decimal places." usage = { @@ -390,6 +429,8 @@ def __call__(self, masks: Union[str, Path]) -> float: class Add(Tool): + r"""Add returns the sum of all the arguments passed to it, normalized to 2 decimal places.""" + name = "add_" description = "'add_' returns the sum of all the arguments passed to it, normalized to 2 decimal places." usage = { @@ -407,6 +448,8 @@ def __call__(self, input: List[int]) -> float: class Subtract(Tool): + r"""Subtract returns the difference of all the arguments passed to it, normalized to 2 decimal places.""" + name = "subtract_" description = "'subtract_' returns the difference of all the arguments passed to it, normalized to 2 decimal places." usage = { @@ -424,6 +467,8 @@ def __call__(self, input: List[int]) -> float: class Multiply(Tool): + r"""Multiply returns the product of all the arguments passed to it, normalized to 2 decimal places.""" + name = "multiply_" description = "'multiply_' returns the product of all the arguments passed to it, normalized to 2 decimal places." usage = { @@ -441,6 +486,8 @@ def __call__(self, input: List[int]) -> float: class Divide(Tool): + r"""Divide returns the division of all the arguments passed to it, normalized to 2 decimal places.""" + name = "divide_" description = "'divide_' returns the division of all the arguments passed to it, normalized to 2 decimal places." usage = { From 8bdd8a96d687b721731a8d9afaf701d840f59701 Mon Sep 17 00:00:00 2001 From: Yazhou Cao Date: Tue, 26 Mar 2024 15:34:47 -0700 Subject: [PATCH 2/2] format code --- vision_agent/image_utils.py | 2 +- vision_agent/tools/tools.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/vision_agent/image_utils.py b/vision_agent/image_utils.py index 96699f96..05a129ce 100644 --- a/vision_agent/image_utils.py +++ b/vision_agent/image_utils.py @@ -29,7 +29,7 @@ def get_image_size(data: Union[str, Path, np.ndarray, ImageType]) -> Tuple[int, """Get the size of an image. Parameters: - data: the input image + data: the input image Returns: The size of the image in the form (height, width) diff --git a/vision_agent/tools/tools.py b/vision_agent/tools/tools.py index 60c1cc00..c5f50e98 100644 --- a/vision_agent/tools/tools.py +++ b/vision_agent/tools/tools.py @@ -99,7 +99,7 @@ def __call__(self, prompt: List[str], image: Union[str, ImageType]) -> List[Dict image: the input image to classify. Returns: - A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] + A list of dictionaries containing the labels and scores. Each dictionary contains the classification result for an image. E.g. [{"labels": ["red line", "yellow dot"], "scores": [0.98, 0.02]}] """ image_b64 = convert_to_b64(image) data = { @@ -342,6 +342,7 @@ def __call__(self, prompt: str, image: Union[str, ImageType]) -> Dict: class Crop(Tool): r"""Crop crops an image given a bounding box and returns a file name of the cropped image.""" + name = "crop_" description = "'crop_' crops an image given a bounding box and returns a file name of the cropped image." usage = {