From 1ae734050b2ce4ca5b006328b393cc0864f865f8 Mon Sep 17 00:00:00 2001 From: Emily Gavrilenko Date: Thu, 17 Oct 2024 07:54:11 -0700 Subject: [PATCH 1/5] implement keypoint visualization block --- .../visualizations/keypoint/__init__.py | 0 .../core_steps/visualizations/keypoint/v1.py | 279 ++++++++++++++++++ 2 files changed, 279 insertions(+) create mode 100644 inference/core/workflows/core_steps/visualizations/keypoint/__init__.py create mode 100644 inference/core/workflows/core_steps/visualizations/keypoint/v1.py diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/__init__.py b/inference/core/workflows/core_steps/visualizations/keypoint/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py new file mode 100644 index 000000000..77af57a68 --- /dev/null +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -0,0 +1,279 @@ +from typing import List, Literal, Optional, Type, Union + +import supervision as sv +from pydantic import ConfigDict, Field +from pprint import pprint + +from inference.core.workflows.execution_engine.constants import ( + KEYPOINTS_XY_KEY_IN_SV_DETECTIONS, +) +from inference.core.workflows.core_steps.visualizations.common.base import ( + OUTPUT_IMAGE_KEY, + VisualizationManifest, + VisualizationBlock, +) +from inference.core.workflows.core_steps.visualizations.common.base_colorable import ( + ColorableVisualizationBlock, + ColorableVisualizationManifest, +) + +from inference.core.workflows.core_steps.visualizations.common.utils import str_to_color +from inference.core.workflows.execution_engine.entities.base import WorkflowImageData +from inference.core.workflows.execution_engine.entities.types import ( + KEYPOINT_DETECTION_PREDICTION_KIND, + INTEGER_KIND, + STRING_KIND, + FLOAT_KIND, + StepOutputSelector, + WorkflowParameterSelector, +) +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlockManifest, + WorkflowBlock, +) +import numpy as np + +TYPE: str = "roboflow_core/keypoint_visualization@v1" +SHORT_DESCRIPTION = "Draws keypoints on detected objects in an image." +LONG_DESCRIPTION = """ +The `KeypointVisualization` block uses a detections from an +keypoint detection model to draw keypoints on objects using +`sv.VertexAnnotator`. +""" + + +class KeypointManifest(VisualizationManifest): + type: Literal[f"{TYPE}", "KeypointVisualization"] + model_config = ConfigDict( + json_schema_extra={ + "name": "Keypoint Visualization", + "version": "v1", + "short_description": SHORT_DESCRIPTION, + "long_description": LONG_DESCRIPTION, + "license": "Apache-2.0", + "block_type": "visualization", + "ui_manifest": { + "section": "visualization", + "icon": "far fa-arrow-down-up-across-line", + "blockPriority": 20, + }, + } + ) + + predictions: StepOutputSelector( + kind=[ + KEYPOINT_DETECTION_PREDICTION_KIND, + ] + ) = Field( # type: ignore + description="Predictions", + examples=["$steps.keypoint_detection_model.predictions"], + ) + + annotator_type: Literal["edge", "vertex", "vertex_label"] = Field( + description="Type of annotator to be used for keypoint visualization.", + default="edge", + json_schema_extra={"always_visible": True}, + ) + + color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + description="Color of the keypoint.", + default="#A351FB", + examples=["#A351FB", "green", "$inputs.color"], + ) + + text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + description="Text color of the keypoint.", + default="black", + examples=["black", "$inputs.text_color"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["vertex_label"], + }, + }, + }, + ) + text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore + description="Scale of the text.", + default=1.0, + examples=[1.0, "$inputs.text_scale"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["vertex_label"], + }, + }, + }, + ) + + text_thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the text characters.", + default=1, + examples=[1, "$inputs.text_thickness"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["vertex_label"], + }, + }, + }, + ) + + text_padding: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Padding around the text in pixels.", + default=10, + examples=[10, "$inputs.text_padding"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["vertex_label"], + }, + }, + }, + ) + + thickness: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Thickness of the outline in pixels.", + default=2, + examples=[2, "$inputs.thickness"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["edge"], + }, + }, + }, + ) + + radius: Union[int, WorkflowParameterSelector(kind=[INTEGER_KIND])] = Field( # type: ignore + description="Radius of the keypoint in pixels.", + default=10, + examples=[10, "$inputs.radius"], + json_schema_extra={ + "relevant_for": { + "annotator_type": { + "values": ["vertex", "vertex_label"], + }, + }, + }, + ) + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.2.0,<2.0.0" + + +class KeypointVisualizationBlockV1(VisualizationBlock): + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + self.annotatorCache = {} + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return KeypointManifest + + def getAnnotator( + self, + color: str, + text_color: str, + text_scale: float, + text_thickness: int, + text_padding: int, + thickness: int, + radius: int, + annotator_type: str, + ) -> sv.annotators.base.BaseAnnotator: + key = "_".join( + map( + str, + [ + color, + text_color, + text_scale, + text_thickness, + text_padding, + thickness, + radius, + annotator_type, + ], + ) + ) + + if key not in self.annotatorCache: + color = str_to_color(color) + text_color = str_to_color(text_color) + + if annotator_type == "edge": + self.annotatorCache[key] = sv.EdgeAnnotator( + color=color, + thickness=thickness, + ) + elif annotator_type == "vertex": + self.annotatorCache[key] = sv.VertexAnnotator( + color=color, + radius=radius, + ) + elif annotator_type == "vertex_label": + self.annotatorCache[key] = sv.VertexLabelAnnotator( + color=color, + text_color=text_color, + text_scale=text_scale, + text_thickness=text_thickness, + text_padding=text_padding, + border_radius=radius, + ) + + return self.annotatorCache[key] + + # Function to convert detections to keypoints + def convert_detections_to_keypoints(self, detections): + keypoints_xy = detections.data["keypoints_xy"] + keypoints_confidence = detections.data["keypoints_confidence"] + keypoints_class_name = detections.data["keypoints_class_name"] + class_id = detections.class_id + + keypoints = sv.KeyPoints( + xy=np.array(keypoints_xy, dtype=np.float32), + confidence=np.array(keypoints_confidence, dtype=np.float32), + class_id=np.array(class_id, dtype=int), + data={"class_name": np.array(keypoints_class_name, dtype=object)}, + ) + return keypoints + + def run( + self, + image: WorkflowImageData, + predictions: sv.Detections, + copy_image: bool, + annotator_type: Optional[str], + color: Optional[str], + text_color: Optional[str], + text_scale: Optional[float], + text_thickness: Optional[int], + text_padding: Optional[int], + thickness: Optional[int], + radius: Optional[int], + ) -> BlockResult: + annotator = self.getAnnotator( + color, + text_color, + text_scale, + text_thickness, + text_padding, + thickness, + radius, + annotator_type, + ) + + keypoints = self.convert_detections_to_keypoints(predictions) + + annotated_image = annotator.annotate( + scene=image.numpy_image.copy() if copy_image else image.numpy_image, + key_points=keypoints, + ) + return { + OUTPUT_IMAGE_KEY: WorkflowImageData.copy_and_replace( + origin_image_data=image, numpy_image=annotated_image + ) + } From 4d6f77a314c03748e747520d34d91eb941310519 Mon Sep 17 00:00:00 2001 From: Emily Gavrilenko Date: Thu, 31 Oct 2024 19:22:31 -0700 Subject: [PATCH 2/5] add unit tests and import --- inference/core/workflows/core_steps/loader.py | 4 + .../core_steps/visualizations/keypoint/v1.py | 25 +- .../visualizations/test_keypoints.py | 360 ++++++++++++++++++ 3 files changed, 377 insertions(+), 12 deletions(-) create mode 100644 tests/workflows/unit_tests/core_steps/visualizations/test_keypoints.py diff --git a/inference/core/workflows/core_steps/loader.py b/inference/core/workflows/core_steps/loader.py index ee54a7f15..803030315 100644 --- a/inference/core/workflows/core_steps/loader.py +++ b/inference/core/workflows/core_steps/loader.py @@ -239,6 +239,9 @@ from inference.core.workflows.core_steps.visualizations.halo.v1 import ( HaloVisualizationBlockV1, ) +from inference.core.workflows.core_steps.visualizations.keypoint.v1 import ( + KeypointVisualizationBlockV1, +) from inference.core.workflows.core_steps.visualizations.label.v1 import ( LabelVisualizationBlockV1, ) @@ -399,6 +402,7 @@ def load_blocks() -> List[Type[WorkflowBlock]]: VLMAsClassifierBlockV1, VLMAsDetectorBlockV1, YoloWorldModelBlockV1, + KeypointVisualizationBlockV1, ] diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index 77af57a68..e4c518517 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -1,38 +1,37 @@ +from pprint import pprint from typing import List, Literal, Optional, Type, Union +import numpy as np import supervision as sv from pydantic import ConfigDict, Field -from pprint import pprint -from inference.core.workflows.execution_engine.constants import ( - KEYPOINTS_XY_KEY_IN_SV_DETECTIONS, -) from inference.core.workflows.core_steps.visualizations.common.base import ( OUTPUT_IMAGE_KEY, - VisualizationManifest, VisualizationBlock, + VisualizationManifest, ) from inference.core.workflows.core_steps.visualizations.common.base_colorable import ( ColorableVisualizationBlock, ColorableVisualizationManifest, ) - from inference.core.workflows.core_steps.visualizations.common.utils import str_to_color +from inference.core.workflows.execution_engine.constants import ( + KEYPOINTS_XY_KEY_IN_SV_DETECTIONS, +) from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( - KEYPOINT_DETECTION_PREDICTION_KIND, + FLOAT_KIND, INTEGER_KIND, + KEYPOINT_DETECTION_PREDICTION_KIND, STRING_KIND, - FLOAT_KIND, StepOutputSelector, WorkflowParameterSelector, ) from inference.core.workflows.prototypes.block import ( BlockResult, - WorkflowBlockManifest, WorkflowBlock, + WorkflowBlockManifest, ) -import numpy as np TYPE: str = "roboflow_core/keypoint_visualization@v1" SHORT_DESCRIPTION = "Draws keypoints on detected objects in an image." @@ -96,8 +95,8 @@ class KeypointManifest(VisualizationManifest): ) text_scale: Union[float, WorkflowParameterSelector(kind=[FLOAT_KIND])] = Field( # type: ignore description="Scale of the text.", - default=1.0, - examples=[1.0, "$inputs.text_scale"], + default=0.5, + examples=[0.5, "$inputs.text_scale"], json_schema_extra={ "relevant_for": { "annotator_type": { @@ -266,7 +265,9 @@ def run( annotator_type, ) + print("predictions", predictions) keypoints = self.convert_detections_to_keypoints(predictions) + print("keypoints", keypoints) annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_keypoints.py b/tests/workflows/unit_tests/core_steps/visualizations/test_keypoints.py new file mode 100644 index 000000000..d73ea56c9 --- /dev/null +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_keypoints.py @@ -0,0 +1,360 @@ +import numpy as np +import pytest +import supervision as sv +from pydantic import ValidationError + +from inference.core.workflows.core_steps.visualizations.keypoint.v1 import ( + KeypointManifest, + KeypointVisualizationBlockV1, +) +from inference.core.workflows.execution_engine.entities.base import ( + ImageParentMetadata, + WorkflowImageData, +) + +TEST_PREDICTIONS = sv.Detections( + xyxy=np.array( + [[615, 16, 1438, 960], [145, 203, 760, 744]], + dtype=np.float64, + ), + class_id=np.array([0, 0]), + data={ + "keypoints_xy": [ + [ + [1135.0, 301.0], + [1217.0, 246.0], + [1072.0, 246.0], + [1326.0, 291.0], + [991.0, 290.0], + [1440.0, 623.0], + [836.0, 642.0], + [1440.0, 848.0], + [714.0, 924.0], + [1273.0, 624.0], + [785.0, 863.0], + [1303.0, 960.0], + [957.0, 960.0], + [1098.0, 940.0], + [798.0, 900.0], + [1145.0, 926.0], + [1131.0, 877.0], + ], + [ + [547.0, 356.0], + [589.0, 334.0], + [504.0, 329.0], + [636.0, 372.0], + [444.0, 362.0], + [690.0, 574.0], + [358.0, 537.0], + [722.0, 776.0], + [196.0, 677.0], + [689.0, 759.0], + [325.0, 532.0], + [623.0, 871.0], + [419.0, 859.0], + [575.0, 813.0], + [365.0, 809.0], + [484.0, 835.0], + [406.0, 826.0], + ], + ], + "keypoints_confidence": [ + [ + 0.9955374002456665, + 0.9850325584411621, + 0.9924459457397461, + 0.6771311163902283, + 0.8257092237472534, + 0.6847628355026245, + 0.988980770111084, + 0.020470470190048218, + 0.4994047284126282, + 0.10626623034477234, + 0.4919512867927551, + 0.002728283405303955, + 0.013417482376098633, + 0.00026804208755493164, + 0.0010205507278442383, + 0.00011846423149108887, + 0.0002935826778411865, + ], + [ + 0.9964120388031006, + 0.9867579340934753, + 0.9912893772125244, + 0.7188618779182434, + 0.8569645881652832, + 0.9544534683227539, + 0.9891356229782104, + 0.503325343132019, + 0.8857028484344482, + 0.6204462647438049, + 0.9193166494369507, + 0.13720226287841797, + 0.26782700419425964, + 0.0065190792083740234, + 0.014030814170837402, + 0.001272827386856079, + 0.0018590092658996582, + ], + ], + "keypoints_class_name": [ + [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle", + ], + [ + "nose", + "left_eye", + "right_eye", + "left_ear", + "right_ear", + "left_shoulder", + "right_shoulder", + "left_elbow", + "right_elbow", + "left_wrist", + "right_wrist", + "left_hip", + "right_hip", + "left_knee", + "right_knee", + "left_ankle", + "right_ankle", + ], + ], + }, +) + + +@pytest.mark.parametrize( + "type_alias", + ["roboflow_core/keypoint_visualization@v1", "KeypointVisualization"], +) +@pytest.mark.parametrize("images_field_alias", ["images", "image"]) +def test_keypoint_validation_when_valid_manifest_is_given( + type_alias: str, + images_field_alias: str, +) -> None: + # given + data = { + "type": type_alias, + "name": "keypoints1", + "predictions": "$steps.kp_model.predictions", + images_field_alias: "$inputs.image", + "annotator_type": "edge", + "color": "#A351FB", + "thickness": 2, + "radius": 10, + } + + # when + result = KeypointManifest.model_validate(data) + + # then + assert result == KeypointManifest( + type=type_alias, + name="keypoints1", + images="$inputs.image", + predictions="$steps.kp_model.predictions", + annotator_type="edge", + color="#A351FB", + text_color="black", + text_scale=0.5, + text_thickness=1, + text_padding=10, + thickness=2, + radius=10, + ) + + +def test_keypoint_validation_when_invalid_image_is_given() -> None: + # given + data = { + "type": "KeypointVisualization", + "name": "keypoints1", + "images": "invalid", + "predictions": "$steps.kp_model.predictions", + "annotator_type": "edge", + "color": "#A351FB", + } + + # when + with pytest.raises(ValidationError): + _ = KeypointManifest.model_validate(data) + + +def test_keypoint_visualization_block_edge() -> None: + # given + block = KeypointVisualizationBlockV1() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=TEST_PREDICTIONS, + copy_image=True, + annotator_type="edge", + color="#A351FB", + text_color="black", + text_scale=0.5, + text_thickness=1, + text_padding=10, + thickness=2, + radius=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal( + output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8) + ) + + # check that the image is copied + assert ( + output.get("image").numpy_image.__array_interface__["data"][0] + != start_image.__array_interface__["data"][0] + ) + + +def test_keypoint_visualization_block_vertex() -> None: + # given + block = KeypointVisualizationBlockV1() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=TEST_PREDICTIONS, + copy_image=True, + annotator_type="vertex", + color="#A351FB", + text_color="black", + text_scale=0.5, + text_thickness=1, + text_padding=10, + thickness=2, + radius=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal( + output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8) + ) + + # check that the image is copied + assert ( + output.get("image").numpy_image.__array_interface__["data"][0] + != start_image.__array_interface__["data"][0] + ) + + +def test_keypoint_visualization_block_vertex_label() -> None: + # given + block = KeypointVisualizationBlockV1() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=TEST_PREDICTIONS, + copy_image=True, + annotator_type="vertex", + color="#A351FB", + text_color="black", + text_scale=0.5, + text_thickness=1, + text_padding=10, + thickness=2, + radius=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal( + output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8) + ) + + # check that the image is copied + assert ( + output.get("image").numpy_image.__array_interface__["data"][0] + != start_image.__array_interface__["data"][0] + ) + + +def test_keypoint_visualization_block_nocopy() -> None: + # given + block = KeypointVisualizationBlockV1() + + start_image = np.zeros((1000, 1000, 3), dtype=np.uint8) + output = block.run( + image=WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id="some"), + numpy_image=start_image, + ), + predictions=TEST_PREDICTIONS, + copy_image=False, + annotator_type="edge", + color="#A351FB", + text_color="black", + text_scale=0.5, + text_thickness=1, + text_padding=10, + thickness=2, + radius=10, + ) + + assert output is not None + assert "image" in output + assert hasattr(output.get("image"), "numpy_image") + + # dimensions of output match input + assert output.get("image").numpy_image.shape == (1000, 1000, 3) + # check if the image is modified + assert not np.array_equal( + output.get("image").numpy_image, np.zeros((1000, 1000, 3), dtype=np.uint8) + ) + + # check if the image reference references the same memory space as the start_image + assert ( + output.get("image").numpy_image.__array_interface__["data"][0] + == start_image.__array_interface__["data"][0] + ) From 5d068448b47bb307a3c190a40d3bd65b69c6a336 Mon Sep 17 00:00:00 2001 From: Emily Gavrilenko Date: Thu, 31 Oct 2024 19:27:49 -0700 Subject: [PATCH 3/5] update keypoint icon --- .../core/workflows/core_steps/visualizations/keypoint/v1.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index e4c518517..bdedc5b33 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -54,7 +54,7 @@ class KeypointManifest(VisualizationManifest): "block_type": "visualization", "ui_manifest": { "section": "visualization", - "icon": "far fa-arrow-down-up-across-line", + "icon": "far fa-braille", "blockPriority": 20, }, } From 0780bee5a7b994a55552df1e0f835462c37328fb Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Fri, 1 Nov 2024 09:55:12 +0100 Subject: [PATCH 4/5] Add integration test --- .../core_steps/visualizations/keypoint/v1.py | 17 +--- ...st_workflow_with_keypoint_visualization.py | 77 +++++++++++++++++++ 2 files changed, 80 insertions(+), 14 deletions(-) create mode 100644 tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index bdedc5b33..06d128a92 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -1,5 +1,4 @@ -from pprint import pprint -from typing import List, Literal, Optional, Type, Union +from typing import Literal, Optional, Type, Union import numpy as np import supervision as sv @@ -10,14 +9,7 @@ VisualizationBlock, VisualizationManifest, ) -from inference.core.workflows.core_steps.visualizations.common.base_colorable import ( - ColorableVisualizationBlock, - ColorableVisualizationManifest, -) from inference.core.workflows.core_steps.visualizations.common.utils import str_to_color -from inference.core.workflows.execution_engine.constants import ( - KEYPOINTS_XY_KEY_IN_SV_DETECTIONS, -) from inference.core.workflows.execution_engine.entities.base import WorkflowImageData from inference.core.workflows.execution_engine.entities.types import ( FLOAT_KIND, @@ -29,7 +21,6 @@ ) from inference.core.workflows.prototypes.block import ( BlockResult, - WorkflowBlock, WorkflowBlockManifest, ) @@ -75,13 +66,13 @@ class KeypointManifest(VisualizationManifest): json_schema_extra={"always_visible": True}, ) - color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Color of the keypoint.", default="#A351FB", examples=["#A351FB", "green", "$inputs.color"], ) - text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( + text_color: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore description="Text color of the keypoint.", default="black", examples=["black", "$inputs.text_color"], @@ -265,9 +256,7 @@ def run( annotator_type, ) - print("predictions", predictions) keypoints = self.convert_detections_to_keypoints(predictions) - print("keypoints", keypoints) annotated_image = annotator.annotate( scene=image.numpy_image.copy() if copy_image else image.numpy_image, diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py b/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py new file mode 100644 index 000000000..ca906ae32 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py @@ -0,0 +1,77 @@ +from datetime import datetime +import os +import time + +import numpy as np +from unittest import mock +from unittest.mock import MagicMock + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.entities.base import VideoMetadata +from inference.core.workflows.execution_engine.introspection import blocks_loader + + +WORKFLOW_KEYPOINT_VISUALIZATION = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + { + "type": "WorkflowParameter", + "name": "model_id", + "default_value": "yolov8n-pose-640", + }, + ], + "steps": [ + { + "type": "KeypointsDetectionModel", + "name": "model", + "image": "$inputs.image", + "model_id": "$inputs.model_id", + }, + { + "type": "roboflow_core/keypoint_visualization@v1", + "name": "visualization", + "image": "$inputs.image", + "predictions": "$steps.model.predictions" + } + ], + "outputs": [ + { + "type": "JsonField", + "name": "visualization", + "selector": "$steps.visualization.image", + }, + ], +} + + +def test_workflow_keypoint_visualization( + model_manager: ModelManager, + crowd_image: np.ndarray, +) -> None: + # given + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_KEYPOINT_VISUALIZATION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": crowd_image, + } + ) + + assert isinstance(result, list) + assert len(result) == 1 + assert "visualization" in result[0] + assert result[0]["visualization"].numpy_image.shape == crowd_image.shape From a6dbbf58771e4538b864e663fcaa8bfa6e7d4ef0 Mon Sep 17 00:00:00 2001 From: Grzegorz Klimaszewski <166530809+grzegorz-roboflow@users.noreply.github.com> Date: Fri, 1 Nov 2024 10:01:01 +0100 Subject: [PATCH 5/5] formatting --- .../core_steps/visualizations/keypoint/v1.py | 5 +---- .../test_workflow_with_keypoint_visualization.py | 13 +++---------- 2 files changed, 4 insertions(+), 14 deletions(-) diff --git a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py index 06d128a92..624a2e30b 100644 --- a/inference/core/workflows/core_steps/visualizations/keypoint/v1.py +++ b/inference/core/workflows/core_steps/visualizations/keypoint/v1.py @@ -19,10 +19,7 @@ StepOutputSelector, WorkflowParameterSelector, ) -from inference.core.workflows.prototypes.block import ( - BlockResult, - WorkflowBlockManifest, -) +from inference.core.workflows.prototypes.block import BlockResult, WorkflowBlockManifest TYPE: str = "roboflow_core/keypoint_visualization@v1" SHORT_DESCRIPTION = "Draws keypoints on detected objects in an image." diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py b/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py index ca906ae32..c3ce3ff94 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_keypoint_visualization.py @@ -1,17 +1,10 @@ -from datetime import datetime -import os -import time - import numpy as np -from unittest import mock -from unittest.mock import MagicMock +import cv2 from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.execution_engine.core import ExecutionEngine -from inference.core.workflows.execution_engine.entities.base import VideoMetadata -from inference.core.workflows.execution_engine.introspection import blocks_loader WORKFLOW_KEYPOINT_VISUALIZATION = { @@ -35,8 +28,8 @@ "type": "roboflow_core/keypoint_visualization@v1", "name": "visualization", "image": "$inputs.image", - "predictions": "$steps.model.predictions" - } + "predictions": "$steps.model.predictions", + }, ], "outputs": [ {