diff --git a/tests/integ/test_tools.py b/tests/integ/test_tools.py
index 4954738c..9bd195eb 100644
--- a/tests/integ/test_tools.py
+++ b/tests/integ/test_tools.py
@@ -11,7 +11,8 @@
dpt_hybrid_midas,
florence2_image_caption,
florence2_ocr,
- florence2_phrase_grounding,
+ florence2_phrase_grounding_image,
+ florence2_phrase_grounding_video,
florence2_roberta_vqa,
florence2_sam2_image,
florence2_sam2_video_tracking,
@@ -92,9 +93,9 @@ def test_owl_v2_video():
assert 24 <= len([res["label"] for res in result[0]]) <= 26
-def test_florence2_phrase_grounding():
+def test_florence2_phrase_grounding_image():
img = ski.data.coins()
- result = florence2_phrase_grounding(
+ result = florence2_phrase_grounding_image(
image=img,
prompt="coin",
)
@@ -102,9 +103,9 @@ def test_florence2_phrase_grounding():
assert [res["label"] for res in result] == ["coin"] * 25
-def test_florence2_phrase_grounding_fine_tune_id():
+def test_florence2_phrase_grounding_image_fine_tune_id():
img = ski.data.coins()
- result = florence2_phrase_grounding(
+ result = florence2_phrase_grounding_image(
prompt="coin",
image=img,
fine_tune_id=FINE_TUNE_ID,
@@ -114,6 +115,32 @@ def test_florence2_phrase_grounding_fine_tune_id():
assert [res["label"] for res in result] == ["coin"] * len(result)
+def test_florence2_phrase_grounding_video():
+ frames = [
+ np.array(Image.fromarray(ski.data.coins()).convert("RGB")) for _ in range(10)
+ ]
+ result = florence2_phrase_grounding_video(
+ prompt="coin",
+ frames=frames,
+ )
+ assert len(result) == 10
+ assert 24 <= len([res["label"] for res in result[0]]) <= 26
+
+
+def test_florence2_phrase_grounding_video_fine_tune_id():
+ frames = [
+ np.array(Image.fromarray(ski.data.coins()).convert("RGB")) for _ in range(10)
+ ]
+ # this calls a fine-tuned florence2 model which is going to be worse at this task
+ result = florence2_phrase_grounding_video(
+ prompt="coin",
+ frames=frames,
+ fine_tune_id=FINE_TUNE_ID,
+ )
+ assert len(result) == 10
+ assert 24 <= len([res["label"] for res in result[0]]) <= 26
+
+
def test_template_match():
img = ski.data.coins()
result = template_match(
diff --git a/tests/unit/test_meta_tools.py b/tests/unit/test_meta_tools.py
index fced644b..ef07bb9e 100644
--- a/tests/unit/test_meta_tools.py
+++ b/tests/unit/test_meta_tools.py
@@ -33,16 +33,16 @@ def test_use_object_detection_fine_tuning_none():
def test_use_object_detection_fine_tuning():
artifacts = Artifacts("test")
- code = """florence2_phrase_grounding('one', image1)
+ code = """florence2_phrase_grounding_image('one', image1)
owl_v2_image('two', image2)
florence2_sam2_image('three', image3)"""
- expected_code = """florence2_phrase_grounding("one", image1, "123")
+ expected_code = """florence2_phrase_grounding_image("one", image1, "123")
owl_v2_image("two", image2, "123")
florence2_sam2_image("three", image3, "123")"""
artifacts["code"] = code
output = use_object_detection_fine_tuning(artifacts, "code", "123")
- assert 'florence2_phrase_grounding("one", image1, "123")' in output
+ assert 'florence2_phrase_grounding_image("one", image1, "123")' in output
assert 'owl_v2_image("two", image2, "123")' in output
assert 'florence2_sam2_image("three", image3, "123")' in output
assert artifacts["code"] == expected_code
@@ -50,24 +50,24 @@ def test_use_object_detection_fine_tuning():
def test_use_object_detection_fine_tuning_twice():
artifacts = Artifacts("test")
- code = """florence2_phrase_grounding('one', image1)
+ code = """florence2_phrase_grounding_image('one', image1)
owl_v2_image('two', image2)
florence2_sam2_image('three', image3)"""
- expected_code1 = """florence2_phrase_grounding("one", image1, "123")
+ expected_code1 = """florence2_phrase_grounding_image("one", image1, "123")
owl_v2_image("two", image2, "123")
florence2_sam2_image("three", image3, "123")"""
- expected_code2 = """florence2_phrase_grounding("one", image1, "456")
+ expected_code2 = """florence2_phrase_grounding_image("one", image1, "456")
owl_v2_image("two", image2, "456")
florence2_sam2_image("three", image3, "456")"""
artifacts["code"] = code
output = use_object_detection_fine_tuning(artifacts, "code", "123")
- assert 'florence2_phrase_grounding("one", image1, "123")' in output
+ assert 'florence2_phrase_grounding_image("one", image1, "123")' in output
assert 'owl_v2_image("two", image2, "123")' in output
assert 'florence2_sam2_image("three", image3, "123")' in output
assert artifacts["code"] == expected_code1
output = use_object_detection_fine_tuning(artifacts, "code", "456")
- assert 'florence2_phrase_grounding("one", image1, "456")' in output
+ assert 'florence2_phrase_grounding_image("one", image1, "456")' in output
assert 'owl_v2_image("two", image2, "456")' in output
assert 'florence2_sam2_image("three", image3, "456")' in output
assert artifacts["code"] == expected_code2
diff --git a/vision_agent/agent/vision_agent_coder_prompts.py b/vision_agent/agent/vision_agent_coder_prompts.py
index 07f2c6e2..45fc02ed 100644
--- a/vision_agent/agent/vision_agent_coder_prompts.py
+++ b/vision_agent/agent/vision_agent_coder_prompts.py
@@ -101,7 +101,7 @@
- Use the 'owl_v2_video' tool with the prompt 'person' to detect where the people are in the video.
plan2:
- Extract frames from 'video.mp4' at 10 FPS using the 'extract_frames_and_timestamps' tool.
-- Use the 'florence2_phrase_grounding' tool with the prompt 'person' to detect where the people are in the video.
+- Use the 'florence2_phrase_grounding_image' tool with the prompt 'person' to detect where the people are in the video.
plan3:
- Extract frames from 'video.mp4' at 10 FPS using the 'extract_frames_and_timestamps' tool.
- Use the 'florence2_sam2_video_tracking' tool with the prompt 'person' to detect where the people are in the video.
@@ -109,7 +109,7 @@
```python
import numpy as np
-from vision_agent.tools import extract_frames_and_timestamps, owl_v2_video, florence2_phrase_grounding, florence2_sam2_video_tracking
+from vision_agent.tools import extract_frames_and_timestamps, owl_v2_video, florence2_phrase_grounding_image, florence2_sam2_video_tracking
# sample at 1 FPS and use the first 10 frames to reduce processing time
frames = extract_frames_and_timestamps("video.mp4", 1)
@@ -143,7 +143,7 @@ def get_counts(preds):
owl_v2_counts = get_counts(owl_v2_out)
# plan2
-florence2_out = [florence2_phrase_grounding("person", f) for f in frames]
+florence2_out = [florence2_phrase_grounding_image("person", f) for f in frames]
florence2_counts = get_counts(florence2_out)
# plan3
@@ -153,13 +153,13 @@ def get_counts(preds):
final_out = {{
"owl_v2_video": owl_v2_out,
- "florence2_phrase_grounding": florence2_out,
+ "florence2_phrase_grounding_image": florence2_out,
"florence2_sam2_video_tracking": f2s2_out,
}}
counts = {{
"owl_v2_video": owl_v2_counts,
- "florence2_phrase_grounding": florence2_counts,
+ "florence2_phrase_grounding_image": florence2_counts,
"florence2_sam2_video_tracking": f2s2_counts,
}}
diff --git a/vision_agent/agent/vision_agent_prompts.py b/vision_agent/agent/vision_agent_prompts.py
index bc3295ef..4a668bda 100644
--- a/vision_agent/agent/vision_agent_prompts.py
+++ b/vision_agent/agent/vision_agent_prompts.py
@@ -131,10 +131,10 @@
OBSERVATION:
[Artifact code.py]
-0|from vision_agent.tools import load_image, florence2_phrase_grounding, closest_box_distance, overlay_bounding_boxes, save_image
+0|from vision_agent.tools import load_image, florence2_phrase_grounding_image, closest_box_distance, overlay_bounding_boxes, save_image
1|def count_workers_with_helmets(image_path: str, output_path: str):
2| image = load_image(image_path)
-3| detections = florence2_phrase_grounding("worker, helmet", image)
+3| detections = florence2_phrase_grounding_image("worker, helmet", image)
4| workers = [d for d in detections if d['label'] == 'worker']
5| helmets = [d for d in detections if d['label'] == 'helmet']
6| count = 0
@@ -166,18 +166,18 @@
OBSERVATION:
[Fine tuning id: 23b3b022-5ebf-4798-9373-20ef36429abf]
-AGENT: {"thoughts": "The model has finished fine tuning, I will now replace the original florence2_phrase_grounding call with the fine tuning id.", "response": "I will not update the code to use the fine tuned model. use_object_detection_fine_tuning(artifacts, "code.py", "23b3b022-5ebf-4798-9373-20ef36429abf")", "let_user_respond": false}
+AGENT: {"thoughts": "The model has finished fine tuning, I will now replace the original florence2_phrase_grounding_image call with the fine tuning id.", "response": "I will not update the code to use the fine tuned model. use_object_detection_fine_tuning(artifacts, "code.py", "23b3b022-5ebf-4798-9373-20ef36429abf")", "let_user_respond": false}
OBSERVATION:
[Artifact code.py edits]
---
+++
@@ -1,7 +1,7 @@
- from vision_agent.tools import load_image, florence2_phrase_grounding, closest_box_distance, overlay_bounding_boxes, save_image
+ from vision_agent.tools import load_image, florence2_phrase_grounding_image, closest_box_distance, overlay_bounding_boxes, save_image
def count_workers_with_helmets(image_path: str, output_path: str):
image = load_image(image_path)
-- detections = florence2_phrase_grounding("worker, helmet", image)
-+ detections = florence2_phrase_grounding("worker, helmet", image, "23b3b022-5ebf-4798-9373-20ef36429abf")
+- detections = florence2_phrase_grounding_image("worker, helmet", image)
++ detections = florence2_phrase_grounding_image("worker, helmet", image, "23b3b022-5ebf-4798-9373-20ef36429abf")
workers = [d for d in detections if d['label'] == 'worker']
helmets = [d for d in detections if d['label'] == 'helmet']
count = 0
@@ -189,5 +189,5 @@ def count_workers_with_helmets(image_path: str, output_path: str):
----- stdout -----
3
-AGENT: {"thoughts": "Three workers with helmets are detected, I will show this to the user and ask them if the result looks good.", "response": "I have updated the code using the fine tuned florence2_phrase_grounding model to count the workers wearing helmets in code.py and saved the visualization under 'workers_viz.png'.", "let_user_respond": true}
+AGENT: {"thoughts": "Three workers with helmets are detected, I will show this to the user and ask them if the result looks good.", "response": "I have updated the code using the fine tuned florence2_phrase_grounding_image model to count the workers wearing helmets in code.py and saved the visualization under 'workers_viz.png'.", "let_user_respond": true}
"""
diff --git a/vision_agent/tools/__init__.py b/vision_agent/tools/__init__.py
index 22453224..ebf98c08 100644
--- a/vision_agent/tools/__init__.py
+++ b/vision_agent/tools/__init__.py
@@ -24,7 +24,8 @@
extract_frames_and_timestamps,
florence2_image_caption,
florence2_ocr,
- florence2_phrase_grounding,
+ florence2_phrase_grounding_image,
+ florence2_phrase_grounding_video,
florence2_roberta_vqa,
florence2_sam2_image,
florence2_sam2_video_tracking,
diff --git a/vision_agent/tools/meta_tools.py b/vision_agent/tools/meta_tools.py
index 7d70e031..597bf5cc 100644
--- a/vision_agent/tools/meta_tools.py
+++ b/vision_agent/tools/meta_tools.py
@@ -665,8 +665,12 @@ def use_object_detection_fine_tuning(
patterns_with_fine_tune_id = [
(
- r'florence2_phrase_grounding\(\s*["\']([^"\']+)["\']\s*,\s*([^,]+)(?:,\s*["\'][^"\']+["\'])?\s*\)',
- lambda match: f'florence2_phrase_grounding("{match.group(1)}", {match.group(2)}, "{fine_tune_id}")',
+ r'florence2_phrase_grounding_image\(\s*["\']([^"\']+)["\']\s*,\s*([^,]+)(?:,\s*["\'][^"\']+["\'])?\s*\)',
+ lambda match: f'florence2_phrase_grounding_image("{match.group(1)}", {match.group(2)}, "{fine_tune_id}")',
+ ),
+ (
+ r'florence2_phrase_grounding_video\(\s*["\']([^"\']+)["\']\s*,\s*([^,]+)(?:,\s*["\'][^"\']+["\'])?\s*\)',
+ lambda match: f'florence2_phrase_grounding_video("{match.group(1)}", {match.group(2)}, "{fine_tune_id}")',
),
(
r'owl_v2_image\(\s*["\']([^"\']+)["\']\s*,\s*([^,]+)(?:,\s*["\'][^"\']+["\'])?\s*\)',
diff --git a/vision_agent/tools/tools.py b/vision_agent/tools/tools.py
index 344726db..ff360d87 100644
--- a/vision_agent/tools/tools.py
+++ b/vision_agent/tools/tools.py
@@ -1141,16 +1141,13 @@ def florence2_image_caption(image: np.ndarray, detail_caption: bool = True) -> s
return answer[task] # type: ignore
-# TODO: add video
-
-
-def florence2_phrase_grounding(
+def florence2_phrase_grounding_image(
prompt: str, image: np.ndarray, fine_tune_id: Optional[str] = None
) -> List[Dict[str, Any]]:
- """'florence2_phrase_grounding' is a tool that can detect multiple
- objects given a text prompt which can be object names or caption. You
- can optionally separate the object names in the text with commas. It returns a list
- of bounding boxes with normalized coordinates, label names and associated
+ """'florence2_phrase_grounding_image' will run florence2 on a image. It can
+ detect multiple objects given a text prompt which can be object names or caption.
+ You can optionally separate the object names in the text with commas. It returns
+ a list of bounding boxes with normalized coordinates, label names and associated
probability scores of 1.0.
Parameters:
@@ -1168,7 +1165,7 @@ def florence2_phrase_grounding(
Example
-------
- >>> florence2_phrase_grounding('person looking at a coyote', image)
+ >>> florence2_phrase_grounding_image('person looking at a coyote', image)
[
{'score': 1.0, 'label': 'person', 'bbox': [0.1, 0.11, 0.35, 0.4]},
{'score': 1.0, 'label': 'coyote', 'bbox': [0.34, 0.21, 0.85, 0.5},
@@ -1196,7 +1193,7 @@ def florence2_phrase_grounding(
data,
"florence2-ft",
v2=True,
- metadata_payload={"function_name": "florence2_phrase_grounding"},
+ metadata_payload={"function_name": "florence2_phrase_grounding_image"},
)
# get the first frame
detection = detections[0]
@@ -1205,7 +1202,7 @@ def florence2_phrase_grounding(
"image": image_b64,
"task": "",
"prompt": prompt,
- "function_name": "florence2_phrase_grounding",
+ "function_name": "florence2_phrase_grounding_image",
}
detections = send_inference_request(data, "florence2", v2=True)
detection = detections[""]
@@ -1222,6 +1219,90 @@ def florence2_phrase_grounding(
return [bbox.model_dump() for bbox in return_data]
+def florence2_phrase_grounding_video(
+ prompt: str, frames: List[np.ndarray], fine_tune_id: Optional[str] = None
+) -> List[Dict[str, Any]]:
+ """'florence2_phrase_grounding_video' will run florence2 on each frame of a video.
+ It can detect multiple objects given a text prompt which can be object names or
+ caption. You can optionally separate the object names in the text with commas.
+ It returns a list of lists where each inner list contains bounding boxes with
+ normalized coordinates, label names and associated probability scores of 1.0.
+
+ Parameters:
+ prompt (str): The prompt to ground to the video.
+ frames (List[np.ndarray]): The list of frames to detect objects.
+ fine_tune_id (Optional[str]): If you have a fine-tuned model, you can pass the
+ fine-tuned model ID here to use it.
+
+ Returns:
+ List[List[Dict[str, Any]]]: A list of lists of dictionaries containing the score,
+ label, and bounding box of the detected objects with normalized coordinates
+ between 0 and 1 (xmin, ymin, xmax, ymax). xmin and ymin are the coordinates
+ of the top-left and xmax and ymax are the coordinates of the bottom-right of
+ the bounding box. The scores are always 1.0 and cannot be thresholded.
+
+ Example
+ -------
+ >>> florence2_phrase_grounding_video('person looking at a coyote', frames)
+ [
+ [
+ {'score': 1.0, 'label': 'person', 'bbox': [0.1, 0.11, 0.35, 0.4]},
+ {'score': 1.0, 'label': 'coyote', 'bbox': [0.34, 0.21, 0.85, 0.5},
+ ],
+ ...
+ ]
+ """
+ if len(frames) == 0:
+ raise ValueError("No frames provided")
+
+ image_size = frames[0].shape[:2]
+ buffer_bytes = frames_to_bytes(frames)
+ files = [("video", buffer_bytes)]
+
+ if fine_tune_id is not None:
+ landing_api = LandingPublicAPI()
+ status = landing_api.check_fine_tuning_job(UUID(fine_tune_id))
+ if status is not JobStatus.SUCCEEDED:
+ raise FineTuneModelIsNotReady(
+ f"Fine-tuned model {fine_tune_id} is not ready yet"
+ )
+
+ data_obj = Florence2FtRequest(
+ video=buffer_bytes,
+ task=PromptTask.PHRASE_GROUNDING,
+ prompt=prompt,
+ job_id=UUID(fine_tune_id),
+ )
+ data = data_obj.model_dump(by_alias=True, exclude_none=True)
+ else:
+ data_obj = Florence2FtRequest(
+ video=buffer_bytes, task=PromptTask.PHRASE_GROUNDING, prompt=prompt
+ )
+ data = data_obj.model_dump(by_alias=True, exclude_none=True)
+
+ detections = send_inference_request(
+ data,
+ "florence2-ft",
+ v2=True,
+ files=files,
+ metadata_payload={"function_name": "florence2_phrase_grounding_video"},
+ )
+
+ bboxes_formatted = []
+ for frame_data in detections:
+ bboxes_formatted_per_frame = []
+ for idx in range(len(frame_data["bboxes"])):
+ bboxes_formatted_per_frame.append(
+ ODResponseData(
+ label=frame_data["labels"][idx],
+ bbox=normalize_bbox(frame_data["bboxes"][idx], image_size),
+ score=1.0,
+ )
+ )
+ bboxes_formatted.append(bboxes_formatted_per_frame)
+ return [[bbox.model_dump() for bbox in frame] for frame in bboxes_formatted]
+
+
def florence2_ocr(image: np.ndarray) -> List[Dict[str, Any]]:
"""'florence2_ocr' is a tool that can detect text and text regions in an image.
Each text region contains one line of text. It returns a list of detected text,
@@ -1233,7 +1314,7 @@ def florence2_ocr(image: np.ndarray) -> List[Dict[str, Any]]:
Returns:
List[Dict[str, Any]]: A list of dictionaries containing the detected text, bbox
- with nornmalized coordinates, and confidence score.
+ with normalized coordinates, and confidence score.
Example
-------
@@ -2077,7 +2158,8 @@ def overlay_counting_results(
florence2_ocr,
florence2_sam2_image,
florence2_sam2_video_tracking,
- florence2_phrase_grounding,
+ florence2_phrase_grounding_image,
+ florence2_phrase_grounding_video,
ixc25_image_vqa,
ixc25_video_vqa,
detr_segmentation,