diff --git a/docs/workflows/create_workflow_block.md b/docs/workflows/create_workflow_block.md index 1fc1d2b6c3..c09b72c5a5 100644 --- a/docs/workflows/create_workflow_block.md +++ b/docs/workflows/create_workflow_block.md @@ -1528,7 +1528,7 @@ the method signatures. In this example, the block visualises crops predictions and creates tiles presenting all crops predictions in single output image. - ```{ .py linenums="1" hl_lines="29-31 48-49 59-60"} + ```{ .py linenums="1" hl_lines="30-32 34-36 53-55 65-66"} from typing import List, Literal, Type, Union import supervision as sv @@ -1556,10 +1556,15 @@ the method signatures. crops_predictions: Selector( kind=[OBJECT_DETECTION_PREDICTION_KIND] ) + scalar_parameter: Union[float, Selector()] @classmethod def get_output_dimensionality_offset(cls) -> int: return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["crops", "crops_predictions"] @classmethod def describe_outputs(cls) -> List[OutputDefinition]: @@ -1578,6 +1583,7 @@ the method signatures. self, crops: Batch[WorkflowImageData], crops_predictions: Batch[sv.Detections], + scalar_parameter: float, ) -> BlockResult: annotator = sv.BoxAnnotator() visualisations = [] @@ -1591,18 +1597,22 @@ the method signatures. return {"visualisations": tile} ``` - * in lines `29-31` manifest class declares output dimensionality + * in lines `30-32` manifest class declares output dimensionality offset - value `-1` should be understood as decreasing dimensionality level by `1` - * in lines `48-49` you can see the impact of output dimensionality decrease - on the method signature. Both inputs are artificially wrapped in `Batch[]` container. - This is done by Execution Engine automatically on output dimensionality decrease when - all inputs have the same dimensionality to enable access to all elements occupying - the last dimensionality level. Obviously, only elements related to the same element + * in lines `34-36` manifest class declares `run(...)` method inputs that will be subject to auto-batch casting + ensuring that the signature is always stable. Auto-batch casting was introduced in Execution Engine `v0.1.6.0` + - refer to [changelog](./execution_engine_changelog.md) for more details. + + * in lines `53-55` you can see the impact of output dimensionality decrease + on the method signature. First two inputs (declared in line `36`) are artificially wrapped in `Batch[]` + container, whereas `scalar_parameter` remains primitive type. This is done by Execution Engine automatically + on output dimensionality decrease when all inputs have the same dimensionality to enable access to + all elements occupying the last dimensionality level. Obviously, only elements related to the same element from top-level batch will be grouped. For instance, if you had two input images that you cropped - crops from those two different images will be grouped separately. - * lines `59-60` illustrate how output is constructed - single value is returned and that value + * lines `65-66` illustrate how output is constructed - single value is returned and that value will be indexed by Execution Engine in output batch with reduced dimensionality === "different input dimensionalities" diff --git a/docs/workflows/execution_engine_changelog.md b/docs/workflows/execution_engine_changelog.md index 7fc53c7951..b9c0f1ebac 100644 --- a/docs/workflows/execution_engine_changelog.md +++ b/docs/workflows/execution_engine_changelog.md @@ -2,42 +2,255 @@ Below you can find the changelog for Execution Engine. -## Execution Engine `v1.2.0` | inference `v0.23.0` +## Execution Engine `v1.6.0` | inference `v0.53.0` -* The [`video_metadata` kind](/workflows/kinds/video_metadata.md) has been deprecated, and we **strongly recommend discontinuing its use for building -blocks moving forward**. As an alternative, the [`image` kind](/workflows/kinds/image.md) has been extended to support the same metadata as -[`video_metadata` kind](/workflows/kinds/video_metadata.md), which can now be provided optionally. This update is -**non-breaking** for existing blocks, but **some older blocks** that produce images **may become incompatible** with -**future** video processing blocks. +!!! Note "Change may require attention" -??? warning "Potential blocks incompatibility" + This release introduces upgrades and new features with **no changes required** to existing workflows. + Some blocks may need to be upgraded to take advantage of the latest Execution Engine capabilities. - As previously mentioned, adding `video_metadata` as an optional field to the internal representation of - [`image` kind](/workflows/kinds/image.md) (`WorkflowImageData` class) - may introduce some friction between existing blocks that output the [`image` kind](/workflows/kinds/image.md) and - future video processing blocks that rely on `video_metadata` being part of `image` representation. +Prior versions of the Execution Engine had significant limitations when interacting with certain types of +blocks - specifically those operating in Single Instruction, Multiple Data (SIMD) mode. These blocks are designed to +process batches of inputs at once, apply the same operation to each element, and return results for the entire batch. + +For example, the `run(...)` method of such a block might look like: + +```python +def run(self, image: Batch[WorkflowImageData], confidence: float): + pass +``` + +In the manifest, the `image` field is declared as accepting batches. + +The issue arose when the input image came from a block that did not operate on batches. In such cases, the +Execution Engine was unable to construct a batch from individual images, which often resulted in frustrating +compilation errors such as: + +``` +Detected invalid reference plugged into property `images` of step `$steps.model` - the step property +strictly requires batch-oriented inputs, yet the input selector holds non-batch oriented input - this indicates +the problem with construction of your Workflow - usually the problem occurs when non-batch oriented step inputs are +filled with outputs of non batch-oriented steps or non batch-oriented inputs. +``` + +In Execution Engine `v1.6.0`, this limitation has been removed, introducing the following behaviour: + +* When it is detected that a given input must be batch-oriented, a procedure called **Auto Batch Casting** is applied. +This automatically converts the input into a `Batch[T]`. Since all batch-mode inputs were already explicitly denoted in +manifests, most blocks (with exceptions noted below) benefit from this upgrade without requiring any internal changes. + +* The dimensionality (level of nesting) of an auto-batch cast parameter is determined at compilation time, based on the +context of the specific block in the workflow as well as its manifest. If other batch-oriented inputs are present +(referred to as *lineage supports*), the Execution Engine uses them as references when constructing auto-casted +batches. This ensures that the number of elements in each batch dimension matches the other data fed into the step +(simulating what would have been asserted if an actual batch input had been provided). If there are no +*lineage supports*, or if the block manifest requires it (e.g. input dimensionality offset is set), the missing +dimensions are generated similarly to the +[`torch.unsqueeze(...)` operation](https://docs.pytorch.org/docs/stable/generated/torch.unsqueeze.html). + +* Step outputs are then evaluated against the presence of an Auto Batch Casting context. Based on the evaluation, +outputs are saved either as batches or as scalars, ensuring that the effect of casting remains local, with the only +exception being output dimensionality changes introduced by the block itself. As a side effect, it is now possible to: + + * **create output batches from scalars** (when the step increases dimensionality), and + + * **collapse batches into scalars** (when the block decreases dimensionality). + +* The two potential friction point arises - first **when a block that does not accept batches** (and thus does not denote +batch-accepting inputs) **decreases output dimensionality**. In previous versions, the Execution Engine handled this by +applying dimensionality wrapping: all batch-oriented inputs were wrapped with an additional `Batch[T]` dimension, +allowing the block’s `run(...)` method to perform reduce operations across the list dimension. With Auto Batch Casting, +however, such blocks no longer provide the Execution Engine with a clear signal about whether certain inputs are +scalars or batches, making casting nondeterministic. To address this, a new manifest method was introduced: +`get_parameters_enforcing_auto_batch_casting(...)`. This method must return the list of parameters for which batch +casting should be enforced when dimensionality is decreased. It is not expected to be used in any other context. + +!!! warning "Impact of new method on existing blocks" + + The requirement of defining `get_parameters_enforcing_auto_batch_casting(...)` method to fully use + Auto Batch Casting feature in the case described above is non-strict. If the block will not be changed, + the only effect will be that workflows wchich were **previously failing** with compilation error may + work or fail with **runtime error**, dependent on the details of block `run(...)` method implementation. + +* The second friction point arises when there is a block declaring input fields supporting batches and scalars using +`get_parameters_accepting_batches_and_scalars(...)` - by default, Execution Engine will skip auto-casting for such +parameters, as the method was historically **always a way to declare that block itself has ability to broadcast scalars +into batches** - see +[implementation of `roboflow_core/detections_transformation@v1`](/inference/core/workflows/core_steps/transformations/detections_transformation/v1.py) +block. In a way, Auto Batch Casting is *redundant* for those blocks - so we propose leaving them as is and +upgrade to use `get_parameters_enforcing_auto_batch_casting(...)` instead of +`get_parameters_accepting_batches_and_scalars(...)` in new versions of such blocks. + +* In earlier versions, a hard constraint existed: dimensionality collapse could only occur at levels ≥ 2 (i.e. only +on nested batches). This limitation is now removed. Dimensionality collapse blocks may also operate on scalars, with +the output dimensionality “bouncing off” the zero ground. + + +There is one **key change in how outputs are built.** In earlier versions of Execution Error, a block was not allowed +to produce a `Batch[X]` directly at the first dimension level — that space was reserved for mapping onto input batches. +Starting with version `v1.6.0`, this restriction has been removed. + +Previously, outputs were always returned as a list of elements: + +* aligned with the input batches, or + +* a single-element list if only scalars were given as inputs. + +This raised a question: what should happen if a block now produces a batch at the first dimension level? +We cannot simply `zip(...)` it with input-based outputs, since the size of these newly generated batches might not +match the number of input elements — making the operation ambiguous. + +To resolve this, we adopted the following rule: + +* Treat the situation as if there were a **"dummy" input batch of size 1**. + +* Consider all batches produced from scalar inputs as being one level deeper than they appear. + +* This follows the principle of broadcasting, allowing such outputs to expand consistently across all elements. + +* Input batch may vanish as a result of execution, but when this happens and new first-level dimension emerges, it +is still going to be virtually nested to ensure outputs consistency. + +**Example:** + +``` +(NO INPUTS) IMAGE FETCHER BLOCK --> image --> OD MODEL --> predictons --> CROPS --> output will be: ["crops": [, , ...]] +``` + +It is important to note that **results generated from previously created workflows valid will be the same** and the +change will only affect new workflows created to utilise new functionalities. + +### Migration guide + +??? Hint "Adding `get_parameters_enforcing_auto_batch_casting(...)` method" + + Blocks which decrease output dimensionality and do not define batch-oriented inputs needs to + declare all inputs which implementation expects to have wrapped in `Batch[T]` with the new class + method of block manifest called `get_parameters_enforcing_auto_batch_casting(...)` + + ```{ .py linenums="1" hl_lines="34-36 53-54"} + from typing import List, Literal, Type, Union + + import supervision as sv - The issue arises because, while we can provide **default** values for `video_metadata` in `image` without - explicitly copying them from the input, any non-default metadata that was added upstream may be lost. - This can lead to downstream blocks that depend on the `video_metadata` not functioning as expected. + from inference.core.workflows.execution_engine.entities.base import ( + Batch, + OutputDefinition, + WorkflowImageData, + ) + from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + OBJECT_DETECTION_PREDICTION_KIND, + Selector, + ) + from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, + ) + + + class BlockManifest(WorkflowBlockManifest): + type: Literal["my_plugin/tile_detections@v1"] + crops: Selector(kind=[IMAGE_KIND]) + crops_predictions: Selector( + kind=[OBJECT_DETECTION_PREDICTION_KIND] + ) + scalar_parameter: Union[float, Selector()] + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["crops", "crops_predictions"] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [ + OutputDefinition(name="visualisations", kind=[IMAGE_KIND]), + ] + + + class TileDetectionsBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BlockManifest + + def run( + self, + crops: Batch[WorkflowImageData], + crops_predictions: Batch[sv.Detections], + scalar_parameter: float, + ) -> BlockResult: + print("This is parameter which will not be auto-batch cast!", scalar_parameter) + annotator = sv.BoxAnnotator() + visualisations = [] + for image, prediction in zip(crops, crops_predictions): + annotated_image = annotator.annotate( + image.numpy_image.copy(), + prediction, + ) + visualisations.append(annotated_image) + tile = sv.create_tiles(visualisations) + return {"visualisations": tile} + ``` - We've updated all existing `roboflow_core` blocks to account for this, but blocks created before this change in - external repositories may cause issues in workflows where their output images are used by video processing blocks. + * in lines `34-36` one needs to add declaration of fields that will be subject to enforced auto-batch casting + * as a result of the above, input parameters of run method (lines `53-54`) will be wrapped into `Batch[T]` by + Execution Engine. -* While the deprecated [`video_metadata` kind](/workflows/kinds/video_metadata.md) is still available for use, it will be fully removed in -Execution Engine version `v2.0.0`. +## Execution Engine `v1.5.0` | inference `v0.38.0` -!!! warning "Breaking change planned - Execution Engine `v2.0.0`" +!!! Note "Change does not require any action" + + This change does not require any change from Workflows users. This is just performance optimisation. - [`video_metadata` kind](/workflows/kinds/video_metadata.md) got deprecated and will be removed in `v2.0.0` +* Exposed new parameter in the init method of `BaseExecutionEngine` class - `executor` which can accept instance of +Python `ThreadPoolExecutor` to be used by execution engine. Thanks to this change, processing should be faster, as +each `BaseExecutionEngine.run(...)` will not require dedicated instance of `ThreadPoolExecutor` as it was so far. +Additionally, we are significantly limiting threads spawning which may also be a benefit in some installations. +* Despite the change, Execution Engine maintains the limit of concurrently executed steps - by limiting the number of +steps that run through the executor at a time (since Execution Engine is no longer in control of `ThreadPoolExecutor` +creation, and it is possible for the pool to have more workers available). -* As a result of the changes mentioned above, the internal representation of the [`image` kind](/workflows/kinds/image.md) has been updated to -include a new `video_metadata` property. This property can be optionally set in the constructor; if not provided, -a default value with reasonable defaults will be used. To simplify metadata manipulation within blocks, we have -introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and `WorkflowImageData.create_crop(...)`. -For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types.md#workflowimagedata). +??? Hint "How to inject `ThreadPoolExecutor` to Execution Engine?" + + ```python + from concurrent.futures import ThreadPoolExecutor + workflow_init_parameters = { ... } + with ThreadPoolExecutor(max_workers=...) as thread_pool_executor: + execution_engine = ExecutionEngine.init( + init_parameters=workflow_init_parameters, + max_concurrent_steps=4, + workflow_id="your-workflow-id", + executor=thread_pool_executor, + ) + runtime_parameters = { + "image": cv2.imread("your-image-path") + } + results = execution_engine.run(runtime_parameters=runtime_parameters) + ``` + +## Execution Engine `v1.4.0` | inference `v0.29.0` + +* Added new kind - [`secret`](/workflows/kinds/secret.md) to represent credentials. **No action needed** for existing +blocks, yet it is expected that over time blocks developers should use this kind, whenever block is to accept secret +value as parameter. + +* Fixed issue with results serialization introduced in `v1.3.0` - by mistake, Execution Engine was not serializing +non-batch oriented outputs. + +* Fixed Execution Engine bug with preparing inputs for steps. For non-SIMD steps before, while collecting inputs +in runtime, `WorkflowBlockManifest.accepts_empty_input()` method result was being ignored - causing the bug when +one non-SIMD step was feeding empty values to downstream blocks. Additionally, in the light of changes made in `v1.3.0`, +thanks to which non-SIMD blocks can easily feed inputs for downstream SIMD steps - it is needed to check if +upstream non-SIMD block yielded non-empty results (as SIMD block may not accept empty results). This check was added. +**No action needed** for existing blocks, but this fix may fix previously broken Workflows. ## Execution Engine `v1.3.0` | inference `v0.27.0` @@ -303,52 +516,41 @@ subsets of steps**, enabling building such tools as debuggers. serializer/deserializer defined as the last one will be in use. -## Execution Engine `v1.4.0` | inference `v0.29.0` +## Execution Engine `v1.2.0` | inference `v0.23.0` -* Added new kind - [`secret`](/workflows/kinds/secret.md) to represent credentials. **No action needed** for existing -blocks, yet it is expected that over time blocks developers should use this kind, whenever block is to accept secret -value as parameter. +* The [`video_metadata` kind](/workflows/kinds/video_metadata.md) has been deprecated, and we **strongly recommend discontinuing its use for building +blocks moving forward**. As an alternative, the [`image` kind](/workflows/kinds/image.md) has been extended to support the same metadata as +[`video_metadata` kind](/workflows/kinds/video_metadata.md), which can now be provided optionally. This update is +**non-breaking** for existing blocks, but **some older blocks** that produce images **may become incompatible** with +**future** video processing blocks. -* Fixed issue with results serialization introduced in `v1.3.0` - by mistake, Execution Engine was not serializing -non-batch oriented outputs. +??? warning "Potential blocks incompatibility" -* Fixed Execution Engine bug with preparing inputs for steps. For non-SIMD steps before, while collecting inputs -in runtime, `WorkflowBlockManifest.accepts_empty_input()` method result was being ignored - causing the bug when -one non-SIMD step was feeding empty values to downstream blocks. Additionally, in the light of changes made in `v1.3.0`, -thanks to which non-SIMD blocks can easily feed inputs for downstream SIMD steps - it is needed to check if -upstream non-SIMD block yielded non-empty results (as SIMD block may not accept empty results). This check was added. -**No action needed** for existing blocks, but this fix may fix previously broken Workflows. + As previously mentioned, adding `video_metadata` as an optional field to the internal representation of + [`image` kind](/workflows/kinds/image.md) (`WorkflowImageData` class) + may introduce some friction between existing blocks that output the [`image` kind](/workflows/kinds/image.md) and + future video processing blocks that rely on `video_metadata` being part of `image` representation. + + The issue arises because, while we can provide **default** values for `video_metadata` in `image` without + explicitly copying them from the input, any non-default metadata that was added upstream may be lost. + This can lead to downstream blocks that depend on the `video_metadata` not functioning as expected. + We've updated all existing `roboflow_core` blocks to account for this, but blocks created before this change in + external repositories may cause issues in workflows where their output images are used by video processing blocks. -## Execution Engine `v1.5.0` | inference `v0.38.0` -!!! Note "Change does not require any action" - - This change does not require any change from Workflows users. This is just performance optimisation. +* While the deprecated [`video_metadata` kind](/workflows/kinds/video_metadata.md) is still available for use, it will be fully removed in +Execution Engine version `v2.0.0`. -* Exposed new parameter in the init method of `BaseExecutionEngine` class - `executor` which can accept instance of -Python `ThreadPoolExecutor` to be used by execution engine. Thanks to this change, processing should be faster, as -each `BaseExecutionEngine.run(...)` will not require dedicated instance of `ThreadPoolExecutor` as it was so far. -Additionally, we are significantly limiting threads spawning which may also be a benefit in some installations. +!!! warning "Breaking change planned - Execution Engine `v2.0.0`" + + [`video_metadata` kind](/workflows/kinds/video_metadata.md) got deprecated and will be removed in `v2.0.0` + + +* As a result of the changes mentioned above, the internal representation of the [`image` kind](/workflows/kinds/image.md) has been updated to +include a new `video_metadata` property. This property can be optionally set in the constructor; if not provided, +a default value with reasonable defaults will be used. To simplify metadata manipulation within blocks, we have +introduced two new class methods: `WorkflowImageData.copy_and_replace(...)` and `WorkflowImageData.create_crop(...)`. +For more details, refer to the updated [`WoorkflowImageData` usage guide](/workflows/internal_data_types.md#workflowimagedata). -* Despite the change, Execution Engine maintains the limit of concurrently executed steps - by limiting the number of -steps that run through the executor at a time (since Execution Engine is no longer in control of `ThreadPoolExecutor` -creation, and it is possible for the pool to have more workers available). -??? Hint "How to inject `ThreadPoolExecutor` to Execution Engine?" - - ```python - from concurrent.futures import ThreadPoolExecutor - workflow_init_parameters = { ... } - with ThreadPoolExecutor(max_workers=...) as thread_pool_executor: - execution_engine = ExecutionEngine.init( - init_parameters=workflow_init_parameters, - max_concurrent_steps=4, - workflow_id="your-workflow-id", - executor=thread_pool_executor, - ) - runtime_parameters = { - "image": cv2.imread("your-image-path") - } - results = execution_engine.run(runtime_parameters=runtime_parameters) - ``` diff --git a/docs/workflows/workflow_execution.md b/docs/workflows/workflow_execution.md index 927c8ec39b..daf962807d 100644 --- a/docs/workflows/workflow_execution.md +++ b/docs/workflows/workflow_execution.md @@ -124,6 +124,14 @@ influencing the processing for all elements in the batch and this type of data w the reference images remain unchanged as you process each input. Thus, the reference images are considered *scalar* data, while the list of input images is *batch-oriented*. + **Great news!** + + Since Execution Engine `v1.6.0`, the practical aspects of dealing with *scalars* and *batches* are offloaded to + the Execution Engine (refer to [changelog](./execution_engine_changelog.md) for more details). As a block + developer, it is still important to understand the difference, but when building blocks you are not forced to + think about the nuances that much. + + To illustrate the distinction, Workflow definitions hold inputs of the two categories: - **Scalar inputs** - like `WorkflowParameter` @@ -356,6 +364,16 @@ execution excludes steps at higher `dimensionality levels` from producing output output field selecting that values will be presented as nested list of empty lists, with depth matching `dimensionality level - 1` of referred output. +Since Execution Engine `v1.6.0`, blocks within a workflow may collapse batches into scalars, as well as create new +batches from scalar inputs. The first scenario is pretty easy to understand - each dictionary in the output list will +simply be populated with the same scalar value. The case of *emergent* batch is slightly more complicated. +In such case we can find batch at dimensionality level 1, which has shape or elements order not compliant +with input batches. To prevent semantic ambiguity, we treat such batch as if it's dimensionality is one level higher +(as if **there is additional batch-oriented input of size one attached to the input of the block creating batch +dynamically**). Such virtually nested outputs are broadcast, such that each dictionary in the output list will be given +new key with the same nested output. This nesting property is preserved even if there is no input-derived outputs +for given workflow - in such case, output is a list of size 1 which contains dictionary with nested output. + Some outputs would require serialisation when Workflows Execution Engine runs behind HTTP API. We use the following serialisation strategies: diff --git a/docs/workflows/workflows_execution_engine.md b/docs/workflows/workflows_execution_engine.md index d0fe5f343c..cc6b9931d9 100644 --- a/docs/workflows/workflows_execution_engine.md +++ b/docs/workflows/workflows_execution_engine.md @@ -86,6 +86,19 @@ batch-oriented input, it will be treated as a SIMD step. Non-SIMD steps, by contrast, are expected to deliver a single result for the input data. In the case of non-SIMD flow-control steps, they affect all downstream steps as a whole, rather than individually for each element in a batch. +Historically, Execution Engine could not handle well all scenarios when non-SIMD steps' outputs were fed into SIMD steps +inputs - causing compilation error due to lack of ability to automatically cast such outputs into batches when feeding +into SIMD seps. Starting with Execution Engine `v1.6.0`, the handling of SIMD and non-SIMD blocks has been improved +through the introduction of **Auto Batch Casting**: + +* When a SIMD input is detected but receives scalar data, the Execution Engine automatically casts it into a batch. + +* The dimensionality of the batch is determined at compile time, using *lineage* information from other +batch-oriented inputs when available. Missing dimensions are generated in a manner similar to `torch.unsqueeze(...)`. + +* Outputs are evaluated against the casting context - leaving them as scalars when block keeps or decreases output +dimensionality or **creating new batches** when increase of dimensionality is expected. + ### Preparing step inputs diff --git a/inference/core/version.py b/inference/core/version.py index 6d64f80e1c..ab9744ad8f 100644 --- a/inference/core/version.py +++ b/inference/core/version.py @@ -1,4 +1,4 @@ -__version__ = "0.52.2" +__version__ = "0.53.0" if __name__ == "__main__": diff --git a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py index 7f6e60d669..3f4c8512bc 100644 --- a/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py +++ b/inference/core/workflows/core_steps/fusion/dimension_collapse/v1.py @@ -59,6 +59,10 @@ def get_output_dimensionality_offset( ) -> int: return -1 + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["data"] + @classmethod def describe_outputs(cls) -> List[OutputDefinition]: return [ diff --git a/inference/core/workflows/errors.py b/inference/core/workflows/errors.py index ff5c9d3da5..f420312849 100644 --- a/inference/core/workflows/errors.py +++ b/inference/core/workflows/errors.py @@ -4,7 +4,7 @@ class WorkflowBlockError(BaseModel): - block_id: str + block_id: Optional[str] = None block_type: Optional[str] = None block_details: Optional[str] = None property_name: Optional[str] = None diff --git a/inference/core/workflows/execution_engine/constants.py b/inference/core/workflows/execution_engine/constants.py index aba670d20d..5b14289969 100644 --- a/inference/core/workflows/execution_engine/constants.py +++ b/inference/core/workflows/execution_engine/constants.py @@ -2,6 +2,7 @@ PARSED_NODE_INPUT_SELECTORS_PROPERTY = "parsed_node_input_selectors" STEP_DEFINITION_PROPERTY = "definition" WORKFLOW_INPUT_BATCH_LINEAGE_ID = "" +TOP_LEVEL_LINEAGES_KEY = "top_level_lineages" IMAGE_TYPE_KEY = "type" IMAGE_VALUE_KEY = "value" ROOT_PARENT_ID_KEY = "root_parent_id" diff --git a/inference/core/workflows/execution_engine/introspection/schema_parser.py b/inference/core/workflows/execution_engine/introspection/schema_parser.py index 022b8312bf..2dc7b40b8c 100644 --- a/inference/core/workflows/execution_engine/introspection/schema_parser.py +++ b/inference/core/workflows/execution_engine/introspection/schema_parser.py @@ -64,12 +64,16 @@ def parse_block_manifest( inputs_accepting_batches_and_scalars = set( manifest_type.get_parameters_accepting_batches_and_scalars() ) + inputs_enforcing_auto_batch_casting = set( + manifest_type.get_parameters_enforcing_auto_batch_casting() + ) return parse_block_manifest_schema( schema=schema, inputs_dimensionality_offsets=inputs_dimensionality_offsets, dimensionality_reference_property=dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) @@ -79,6 +83,7 @@ def parse_block_manifest_schema( dimensionality_reference_property: Optional[str], inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> BlockManifestMetadata: primitive_types = retrieve_primitives_from_schema( schema=schema, @@ -89,6 +94,7 @@ def parse_block_manifest_schema( dimensionality_reference_property=dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) return BlockManifestMetadata( primitive_types=primitive_types, @@ -255,6 +261,7 @@ def retrieve_selectors_from_schema( dimensionality_reference_property: Optional[str], inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> Dict[str, SelectorDefinition]: result = [] for property_name, property_definition in schema[PROPERTIES_KEY].items(): @@ -277,6 +284,7 @@ def retrieve_selectors_from_schema( is_list_element=True, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) elif property_definition.get(TYPE_KEY) == OBJECT_TYPE and isinstance( property_definition.get(ADDITIONAL_PROPERTIES_KEY), dict @@ -290,6 +298,7 @@ def retrieve_selectors_from_schema( is_dict_element=True, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) else: selector = retrieve_selectors_from_simple_property( @@ -300,6 +309,7 @@ def retrieve_selectors_from_schema( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) if selector is not None: result.append(selector) @@ -314,6 +324,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property: bool, inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], is_list_element: bool = False, is_dict_element: bool = False, ) -> Optional[SelectorDefinition]: @@ -323,9 +334,15 @@ def retrieve_selectors_from_simple_property( ) if declared_points_to_batch == "dynamic": if property_name in inputs_accepting_batches_and_scalars: - points_to_batch = {True, False} + if property_name in inputs_enforcing_auto_batch_casting: + points_to_batch = {True} + else: + points_to_batch = {True, False} else: - points_to_batch = {property_name in inputs_accepting_batches} + points_to_batch = { + property_name in inputs_accepting_batches + or property_name in inputs_enforcing_auto_batch_casting + } else: points_to_batch = {declared_points_to_batch} allowed_references = [ @@ -359,6 +376,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, is_list_element=True, ) if property_defines_union(property_definition=property_definition): @@ -372,6 +390,7 @@ def retrieve_selectors_from_simple_property( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, ) return None @@ -394,6 +413,7 @@ def retrieve_selectors_from_union_definition( is_dimensionality_reference_property: bool, inputs_accepting_batches: Set[str], inputs_accepting_batches_and_scalars: Set[str], + inputs_enforcing_auto_batch_casting: Set[str], ) -> Optional[SelectorDefinition]: union_types = ( union_definition.get(ANY_OF_KEY, []) @@ -410,6 +430,7 @@ def retrieve_selectors_from_union_definition( is_dimensionality_reference_property=is_dimensionality_reference_property, inputs_accepting_batches=inputs_accepting_batches, inputs_accepting_batches_and_scalars=inputs_accepting_batches_and_scalars, + inputs_enforcing_auto_batch_casting=inputs_enforcing_auto_batch_casting, is_list_element=is_list_element, ) if result is None: diff --git a/inference/core/workflows/execution_engine/v1/compiler/entities.py b/inference/core/workflows/execution_engine/v1/compiler/entities.py index 6c9b945c6e..b95fa9e9d3 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/entities.py +++ b/inference/core/workflows/execution_engine/v1/compiler/entities.py @@ -216,6 +216,12 @@ def iterate_through_definitions(self) -> Generator[StepInputDefinition, None, No StepInputData = Dict[str, Union[StepInputDefinition, CompoundStepInputDefinition]] +@dataclass +class AutoBatchCastingConfig: + casted_dimensionality: int + lineage_support: Optional[List[str]] + + @dataclass class StepNode(ExecutionGraphNode): step_manifest: WorkflowBlockManifest @@ -224,6 +230,9 @@ class StepNode(ExecutionGraphNode): child_execution_branches: Dict[str, str] = field(default_factory=dict) execution_branches_impacting_inputs: Set[str] = field(default_factory=set) batch_oriented_parameters: Set[str] = field(default_factory=set) + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig] = field( + default_factory=dict + ) step_execution_dimensionality: int = 0 def controls_flow(self) -> bool: diff --git a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py index 176e0ab2d2..c5822fee54 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py +++ b/inference/core/workflows/execution_engine/v1/compiler/graph_constructor.py @@ -1,7 +1,7 @@ import itertools from collections import defaultdict from copy import copy, deepcopy -from typing import Any, Dict, List, Optional, Set, Tuple, Union +from typing import Any, Callable, Dict, List, Optional, Set, Tuple, Union from uuid import uuid4 import networkx as nx @@ -22,6 +22,7 @@ from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, PARSED_NODE_INPUT_SELECTORS_PROPERTY, + TOP_LEVEL_LINEAGES_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( @@ -44,6 +45,7 @@ execution_phase, ) from inference.core.workflows.execution_engine.v1.compiler.entities import ( + AutoBatchCastingConfig, CompoundStepInputDefinition, DictOfStepInputDefinitions, DynamicStepInputDefinition, @@ -159,6 +161,11 @@ def add_input_nodes_for_graph( ) data_lineage = [WORKFLOW_INPUT_BATCH_LINEAGE_ID] for _ in range(input_spec.dimensionality - 1): + # TODO: this may end up being a bug - with ability for multi-step debugging, if we will + # ever have a situation that there will be multiple step outputs with nested + # dimensionality with the same lineage, this re-construction method will + # assign a different lineage identifier, causing the inputs being non-composable in + # a single execution branch data_lineage.append(f"{uuid4()}") else: data_lineage = [] @@ -595,6 +602,7 @@ def denote_data_flow_in_workflow( input_manifest=None, # this is expected never to be reached ) ) + top_level_data_lineage = set() for node in traverse_graph_ensuring_parents_are_reached_first( graph=execution_graph, start_node=super_input_node, @@ -603,8 +611,12 @@ def denote_data_flow_in_workflow( execution_graph=execution_graph, node=node, block_manifest_by_step_name=block_manifest_by_step_name, + on_top_level_lineage_denoted=lambda element: top_level_data_lineage.add( + element + ), ) execution_graph.remove_node(super_input_node) + execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] = top_level_data_lineage return execution_graph @@ -612,6 +624,7 @@ def denote_data_flow_for_node( execution_graph: DiGraph, node: str, block_manifest_by_step_name: Dict[str, WorkflowBlockManifest], + on_top_level_lineage_denoted: Callable[[str], None], ) -> DiGraph: if is_input_node(execution_graph=execution_graph, node=node): # everything already set there, in the previous stage of compilation @@ -632,6 +645,7 @@ def denote_data_flow_for_node( execution_graph=execution_graph, node=node, manifest=manifest, + on_top_level_lineage_denoted=on_top_level_lineage_denoted, ) if is_output_node(execution_graph=execution_graph, node=node): # output is allowed to have exactly one predecessor @@ -672,6 +686,7 @@ def denote_data_flow_for_step( execution_graph: DiGraph, node: str, manifest: WorkflowBlockManifest, + on_top_level_lineage_denoted: Callable[[str], None], ) -> DiGraph: all_control_flow_predecessors, all_non_control_flow_predecessors = ( separate_flow_control_predecessors_from_data_providers( @@ -690,10 +705,31 @@ def denote_data_flow_for_step( node=node, expected_type=StepNode, ) + parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ + PARSED_NODE_INPUT_SELECTORS_PROPERTY + ] + batch_compatibility_of_properties = retrieve_batch_compatibility_of_input_selectors( + input_selectors=parsed_step_input_selectors + ) + scalar_parameters_to_be_batched = ( + verify_declared_batch_compatibility_against_actual_inputs( + node=node, + step_node_data=step_node_data, + input_data=input_data, + batch_compatibility_of_properties=batch_compatibility_of_properties, + ) + ) + input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() + verify_step_input_dimensionality_offsets( + step_name=step_name, + input_dimensionality_offsets=input_dimensionality_offsets, + ) inputs_dimensionalities = get_inputs_dimensionalities( step_name=step_name, step_type=manifest.type, input_data=input_data, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + input_dimensionality_offsets=input_dimensionality_offsets, ) logger.debug( f"For step: {node}, detected the following input dimensionalities: {inputs_dimensionalities}" @@ -702,7 +738,6 @@ def denote_data_flow_for_step( inputs_dimensionalities=inputs_dimensionalities, ) dimensionality_reference_property = manifest.get_dimensionality_reference_property() - input_dimensionality_offsets = manifest.get_input_dimensionality_offsets() output_dimensionality_offset = manifest.get_output_dimensionality_offset() verify_step_input_dimensionality_offsets( step_name=step_name, @@ -722,7 +757,11 @@ def denote_data_flow_for_step( inputs_dimensionalities=inputs_dimensionalities, dimensionality_offstes=input_dimensionality_offsets, ) - all_lineages = get_input_data_lineage(step_name=step_name, input_data=input_data) + all_lineages = get_input_data_lineage_excluding_auto_batch_casting( + step_name=step_name, + input_data=input_data, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + ) verify_compatibility_of_input_data_lineage_with_control_flow_lineage( step_name=step_name, inputs_lineage=all_lineages, @@ -738,59 +777,12 @@ def denote_data_flow_for_step( output_dimensionality_offset=output_dimensionality_offset, ) ) - parsed_step_input_selectors: List[ParsedSelector] = execution_graph.nodes[node][ - PARSED_NODE_INPUT_SELECTORS_PROPERTY - ] - input_property2batch_expected = defaultdict(set) - for parsed_selector in parsed_step_input_selectors: - for reference in parsed_selector.definition.allowed_references: - input_property2batch_expected[ - parsed_selector.definition.property_name - ].update(reference.points_to_batch) - for property_name, input_definition in input_data.items(): - if property_name not in input_property2batch_expected: - # only values plugged vi selectors are to be validated - continue - if input_definition.is_compound_input(): - actual_input_is_batch = { - element.is_batch_oriented() - for element in input_definition.iterate_through_definitions() - } + if not all_lineages: + if manifest.get_output_dimensionality_offset() > 0: + # brave decision to open a Pandora box + data_lineage = [node] else: - actual_input_is_batch = {input_definition.is_batch_oriented()} - batch_input_expected = input_property2batch_expected[property_name] - step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() - if ( - step_accepts_batch_input - and batch_input_expected == {False} - and True in actual_input_is_batch - ): - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property do not accept batch-oriented inputs, yet the input selector " - f"holds one - this indicates the problem with " - f"construction of your Workflow - usually the problem occurs when non-batch oriented " - f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", - ) - if ( - step_accepts_batch_input - and batch_input_expected == {True} - and False in actual_input_is_batch - ): - raise ExecutionGraphStructureError( - public_message=f"Detected invalid reference plugged " - f"into property `{property_name}` of step `{node}` - the step " - f"property strictly requires batch-oriented inputs, yet the input selector " - f"holds non-batch oriented input - this indicates the " - f"problem with construction of your Workflow - usually the problem occurs when " - f"non-batch oriented step inputs are filled with outputs of non batch-oriented " - f"steps or non batch-oriented inputs.", - context="workflow_compilation | execution_graph_construction", - ) - if not parameters_with_batch_inputs: - data_lineage = [] + data_lineage = [] else: data_lineage = establish_batch_oriented_step_lineage( step_selector=node, @@ -799,6 +791,14 @@ def denote_data_flow_for_step( dimensionality_reference_property=dimensionality_reference_property, output_dimensionality_offset=output_dimensionality_offset, ) + lineage_supports = get_lineage_support_for_auto_batch_casted_parameters( + input_dimensionalities=inputs_dimensionalities, + all_lineages_of_batch_parameters=all_lineages, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + ) + step_node_data.auto_batch_casting_lineage_supports = lineage_supports + if data_lineage: + on_top_level_lineage_denoted(data_lineage[0]) step_node_data.data_lineage = data_lineage return execution_graph @@ -1180,10 +1180,10 @@ def verify_output_offset( dimensionality_reference_property: Optional[str], output_dimensionality_offset: int, ) -> None: - if not parameters_with_batch_inputs and output_dimensionality_offset != 0: - raise BlockInterfaceError( - public_message=f"Block defining step {step_name} defines dimensionality offset different " - f"than zero while taking only non-batch parameters, which is not allowed.", + if not parameters_with_batch_inputs and output_dimensionality_offset < 0: + raise StepInputDimensionalityError( + public_message=f"Block defining step {step_name} defines negative dimensionality offset while only " + f"scalar inputs being provided - the block cannot run as there is no dimension to collapse.", context="workflow_compilation | execution_graph_construction | verification_of_output_offset", ) if ( @@ -1403,10 +1403,51 @@ def get_batch_lineage_prefixes(lineage: List[str]) -> List[List[str]]: def get_inputs_dimensionalities( - step_name: str, step_type: str, input_data: StepInputData + step_name: str, + step_type: str, + input_data: StepInputData, + scalar_parameters_to_be_batched: Set[str], + input_dimensionality_offsets: Dict[str, int], ) -> Dict[str, Set[int]]: result = defaultdict(set) dimensionalities_spotted = set() + offset_parameters = { + parameter: value + for parameter, value in input_dimensionality_offsets.items() + if value > 0 + } + non_offset_parameters_dimensionality_values = set() + for property_name, input_definition in input_data.items(): + if property_name in offset_parameters: + continue + if input_definition.is_compound_input(): + for value in input_definition.iterate_through_definitions(): + if value.is_batch_oriented(): + non_offset_parameters_dimensionality_values.add( + value.get_dimensionality() + ) + elif input_definition.is_batch_oriented(): + non_offset_parameters_dimensionality_values.add( + input_definition.get_dimensionality() + ) + if len(non_offset_parameters_dimensionality_values) > 1: + raise StepInputDimensionalityError( + public_message=f"For step {step_name} attempted to plug input data that are in different dimensions, " + f"whereas block defines the inputs to be equal in that terms.", + context="workflow_compilation | execution_graph_construction | collecting_step_input_data", + blocks_errors=[ + WorkflowBlockError( + block_id=step_name, + block_type=step_type, + block_details=f"Dimensionality of input parameters differs by more than 1. Detected dimensions: {dict(result)}", + ) + ], + ) + non_offset_parameters_dimensionality_value = ( + non_offset_parameters_dimensionality_values.pop() + if len(non_offset_parameters_dimensionality_values) > 0 + else 1 + ) for property_name, input_definition in input_data.items(): if input_definition.is_compound_input(): result[property_name] = get_compound_input_dimensionality( @@ -1414,9 +1455,21 @@ def get_inputs_dimensionalities( step_type=step_type, property_name=property_name, input_definition=input_definition, + scalar_parameters_to_be_batched=scalar_parameters_to_be_batched, + non_offset_parameters_dimensionality_value=non_offset_parameters_dimensionality_value, + offset_parameters=offset_parameters, ) else: - result[property_name] = {input_definition.get_dimensionality()} + if property_name in scalar_parameters_to_be_batched: + if property_name not in offset_parameters: + result[property_name] = {non_offset_parameters_dimensionality_value} + else: + result[property_name] = ( + non_offset_parameters_dimensionality_value + + offset_parameters[property_name] + ) + else: + result[property_name] = {input_definition.get_dimensionality()} dimensionalities_spotted.update(result[property_name]) non_zero_dimensionalities_spotted = {d for d in dimensionalities_spotted if d != 0} if len(non_zero_dimensionalities_spotted) > 0: @@ -1443,10 +1496,24 @@ def get_compound_input_dimensionality( property_name: str, step_type: str, input_definition: CompoundStepInputDefinition, + scalar_parameters_to_be_batched: Set[str], + offset_parameters: Dict[str, int], + non_offset_parameters_dimensionality_value: int, ) -> Set[int]: dimensionalities_spotted = set() for definition in input_definition.iterate_through_definitions(): - dimensionalities_spotted.add(definition.get_dimensionality()) + if ( + property_name not in scalar_parameters_to_be_batched + or definition.is_batch_oriented() + ): + dimensionalities_spotted.add(definition.get_dimensionality()) + elif property_name not in offset_parameters: + dimensionalities_spotted.add(non_offset_parameters_dimensionality_value) + else: + dimensionalities_spotted.add( + non_offset_parameters_dimensionality_value + + offset_parameters[property_name] + ) non_zero_dimensionalities = {e for e in dimensionalities_spotted if e != 0} if len(non_zero_dimensionalities) > 1: raise StepInputDimensionalityError( @@ -1476,9 +1543,74 @@ def grab_parameters_defining_batch_inputs( return result -def get_input_data_lineage( +def retrieve_batch_compatibility_of_input_selectors( + input_selectors: List[ParsedSelector], +) -> Dict[str, Set[bool]]: + batch_compatibility_of_properties = defaultdict(set) + for parsed_selector in input_selectors: + for reference in parsed_selector.definition.allowed_references: + batch_compatibility_of_properties[ + parsed_selector.definition.property_name + ].update(reference.points_to_batch) + return batch_compatibility_of_properties + + +def verify_declared_batch_compatibility_against_actual_inputs( + node: str, + step_node_data: StepNode, + input_data: StepInputData, + batch_compatibility_of_properties: Dict[str, Set[bool]], +) -> Set[str]: + scalar_parameters_to_be_batched = set() + parameters_accepting_batches_and_scalars = set( + step_node_data.step_manifest.get_parameters_accepting_batches_and_scalars() + ) + hardcoded_inputs_to_be_batch_compatible = set( + step_node_data.step_manifest.get_parameters_enforcing_auto_batch_casting() + + step_node_data.step_manifest.get_parameters_accepting_batches() + ) + for property_name, input_definition in input_data.items(): + if property_name not in batch_compatibility_of_properties: + actual_input_is_batch = {False} + if property_name in parameters_accepting_batches_and_scalars: + batch_compatibility = {True, False} + elif property_name in hardcoded_inputs_to_be_batch_compatible: + batch_compatibility = {True} + else: + continue + elif input_definition.is_compound_input(): + actual_input_is_batch = { + element.is_batch_oriented() + for element in input_definition.iterate_through_definitions() + } + batch_compatibility = batch_compatibility_of_properties[property_name] + else: + actual_input_is_batch = {input_definition.is_batch_oriented()} + batch_compatibility = batch_compatibility_of_properties[property_name] + step_accepts_batch_input = step_node_data.step_manifest.accepts_batch_input() + if ( + step_accepts_batch_input + and batch_compatibility == {False} + and True in actual_input_is_batch + ): + raise ExecutionGraphStructureError( + public_message=f"Detected invalid reference plugged " + f"into property `{property_name}` of step `{node}` - the step " + f"property do not accept batch-oriented inputs, yet the input selector " + f"holds one - this indicates the problem with " + f"construction of your Workflow - usually the problem occurs when non-batch oriented " + f"step inputs are filled with outputs of batch-oriented steps or batch-oriented inputs.", + context="workflow_compilation | execution_graph_construction", + ) + if batch_compatibility == {True} and False in actual_input_is_batch: + scalar_parameters_to_be_batched.add(property_name) + return scalar_parameters_to_be_batched + + +def get_input_data_lineage_excluding_auto_batch_casting( step_name: str, input_data: StepInputData, + scalar_parameters_to_be_batched: Set[str], ) -> List[List[str]]: lineage_deduplication_set = set() lineages = [] @@ -1489,6 +1621,11 @@ def get_input_data_lineage( input_definition=input_definition, lineage_deduplication_set=lineage_deduplication_set, ) + if ( + property_name in scalar_parameters_to_be_batched + and len(new_lineages_detected_within_property_data) == 0 + ): + continue lineages.extend(new_lineages_detected_within_property_data) if not lineages: return lineages @@ -1496,6 +1633,40 @@ def get_input_data_lineage( return lineages +def get_lineage_support_for_auto_batch_casted_parameters( + input_dimensionalities: Dict[str, Set[int]], + scalar_parameters_to_be_batched: Set[str], + all_lineages_of_batch_parameters: List[List[str]], +) -> Dict[str, AutoBatchCastingConfig]: + longest_lineage_support = find_longest_lineage_support( + all_lineages_of_batch_parameters=all_lineages_of_batch_parameters, + ) + result = {} + for parameter_name in scalar_parameters_to_be_batched: + parameter_dimensionality = max(input_dimensionalities[parameter_name]) + if longest_lineage_support is None: + lineage_support = None + else: + lineage_support = longest_lineage_support[:parameter_dimensionality] + result[parameter_name] = AutoBatchCastingConfig( + casted_dimensionality=parameter_dimensionality, + lineage_support=lineage_support, + ) + return result + + +def find_longest_lineage_support( + all_lineages_of_batch_parameters: List[List[str]], +) -> Optional[List[str]]: + longest_longest_lineage_support = [] + for lineage in all_lineages_of_batch_parameters: + if len(lineage) > len(longest_longest_lineage_support): + longest_longest_lineage_support = lineage + if len(longest_longest_lineage_support) == 0: + return None + return longest_longest_lineage_support + + def get_lineage_for_input_property( step_name: str, property_name: str, @@ -1612,13 +1783,6 @@ def establish_batch_oriented_step_lineage( ) if output_dimensionality_offset < 0: result_dimensionality = reference_lineage[:output_dimensionality_offset] - if len(result_dimensionality) == 0: - raise StepOutputLineageError( - public_message=f"Step {step_selector} is to decrease dimensionality, but it is not possible if " - f"input dimensionality is not greater or equal 2, otherwise output would not " - f"be batch-oriented.", - context="workflow_compilation | execution_graph_construction | establishing_step_output_lineage", - ) return result_dimensionality if output_dimensionality_offset == 0: return reference_lineage @@ -1641,7 +1805,7 @@ def get_reference_lineage( f"This is most likely the bug. Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) property_data = input_data[dimensionality_reference_property] if property_data.is_compound_input(): @@ -1657,7 +1821,7 @@ def get_reference_lineage( f"Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) if not property_data.is_batch_oriented(): raise AssumptionError( @@ -1666,7 +1830,7 @@ def get_reference_lineage( f"Contact Roboflow team through github issues " f"(https://github.com/roboflow/inference/issues) providing full " f"context of the problem - including workflow definition you use.", - context="workflow_compilation | execution_graph_construction | collecting_step_inputs", + context="workflow_compilation | execution_graph_construction | collecting_step_inputs_lineage", ) return copy(property_data.data_lineage) diff --git a/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py b/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py index 9cbac90cf5..15986b2392 100644 --- a/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py +++ b/inference/core/workflows/execution_engine/v1/compiler/syntactic_parser.py @@ -77,7 +77,6 @@ def parse_workflow_definition( property_name = None if len(loc) > 3 and loc[2] == element_type: property_name = str(loc[3]) - block_error = WorkflowBlockError( block_id=element_name, block_type=element_type, diff --git a/inference/core/workflows/execution_engine/v1/core.py b/inference/core/workflows/execution_engine/v1/core.py index 3f0134b506..9df7fb33d4 100644 --- a/inference/core/workflows/execution_engine/v1/core.py +++ b/inference/core/workflows/execution_engine/v1/core.py @@ -23,7 +23,7 @@ validate_runtime_input, ) -EXECUTION_ENGINE_V1_VERSION = Version("1.5.0") +EXECUTION_ENGINE_V1_VERSION = Version("1.6.0") class ExecutionEngineV1(BaseExecutionEngine): diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py index 46e06046ab..e8702a71cf 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/block_assembler.py @@ -381,6 +381,14 @@ def assembly_manifest_class_methods( "get_parameters_accepting_batches_and_scalars", classmethod(get_parameters_accepting_batches_and_scalars), ) + get_parameters_enforcing_auto_batch_casting = ( + lambda cls: manifest_description.get_parameters_enforcing_auto_batch_casting + ) + setattr( + manifest_class, + "get_parameters_enforcing_auto_batch_casting", + classmethod(get_parameters_enforcing_auto_batch_casting), + ) input_dimensionality_offsets = collect_input_dimensionality_offsets( inputs=manifest_description.inputs ) diff --git a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py index 6e6e6a72fe..7fbb4d2c4d 100644 --- a/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py +++ b/inference/core/workflows/execution_engine/v1/dynamic_blocks/entities.py @@ -116,6 +116,13 @@ class ManifestDescription(BaseModel): "Value will override `accepts_batch_input` if non-empty " "list is provided, `accepts_batch_input` is kept not to break backward compatibility.", ) + get_parameters_enforcing_auto_batch_casting: List[str] = Field( + default_factory=list, + description="List of parameters, for which auto-batch casting should be enforced, making sure that the block " + "run(...) method will always receive the parameters as batches, not scalars. This property is important for " + "blocks decreasing output dimensionality which do not define neither `batch_oriented_parameters` nor " + "`parameters_with_scalars_and_batches`.", + ) class PythonCode(BaseModel): diff --git a/inference/core/workflows/execution_engine/v1/executor/core.py b/inference/core/workflows/execution_engine/v1/executor/core.py index fa4e2d18f4..f41859f2a1 100644 --- a/inference/core/workflows/execution_engine/v1/executor/core.py +++ b/inference/core/workflows/execution_engine/v1/executor/core.py @@ -180,7 +180,13 @@ def run_simd_step( step_name = get_last_chunk_of_selector(selector=step_selector) step_instance = workflow.steps[step_name].step step_manifest = workflow.steps[step_name].manifest - if step_manifest.accepts_batch_input(): + collapse_of_batch_to_scalar_expected = ( + step_manifest.get_output_dimensionality_offset() < 0 + and not execution_data_manager.does_step_produce_batches( + step_selector=step_selector + ) + ) + if step_manifest.accepts_batch_input() or collapse_of_batch_to_scalar_expected: return run_simd_step_in_batch_mode( step_selector=step_selector, step_instance=step_instance, @@ -300,15 +306,6 @@ def run_non_simd_step( }, ): step_result = step_instance.run(**step_input) - if isinstance(step_result, list): - raise ExecutionEngineRuntimeError( - public_message=f"Error in execution engine. Non-SIMD step {step_name} " - f"produced list of results which is not expected. This is most likely bug. " - f"Contact Roboflow team through github issues " - f"(https://github.com/roboflow/inference/issues) providing full context of" - f"the problem - including workflow definition you use.", - context="workflow_execution | step_output_registration", - ) with profiler.profile_execution_phase( name="step_output_registration", categories=["execution_engine_operation"], diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py index 9626493462..7bf5148bd8 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/execution_cache.py @@ -41,7 +41,7 @@ def init( expected_type=StepNode, ) step_name = node_data.step_manifest.name - compatible_with_batches = node_data.is_batch_oriented() + compatible_with_batches = node_data.output_dimensionality > 0 outputs = node_data.step_manifest.get_actual_outputs() cache.declare_step( step_name=step_name, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py index e0c7178f11..a4d4ad7539 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/manager.py @@ -3,7 +3,7 @@ from networkx import DiGraph from inference.core import logger -from inference.core.workflows.errors import ExecutionEngineRuntimeError +from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( NODE_COMPILATION_OUTPUT_PROPERTY, ) @@ -121,7 +121,7 @@ def get_non_simd_step_input(self, step_selector: str) -> Optional[Dict[str, Any] ) def register_non_simd_step_output( - self, step_selector: str, output: Union[Dict[str, Any], FlowControl] + self, step_selector: str, output: Union[Dict[str, Any], FlowControl, list] ) -> None: if self.is_step_simd(step_selector=step_selector): raise ExecutionEngineRuntimeError( @@ -138,6 +138,44 @@ def register_non_simd_step_output( node=step_selector, expected_type=StepNode, ) + if step_node.output_dimensionality == 1: + # we only allow +1 dim increase for now, so it is fine to only handle this case + indices = [(i,) for i in range(len(output))] + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + if step_node.child_execution_branches: + if not all(isinstance(element, FlowControl) for element in output): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Flow control step {step_name} " + f"expected to only produce FlowControl objects. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + self._register_flow_control_output_for_simd_step( + step_node=step_node, + indices=indices, + outputs=output, + ) + return None + self._execution_cache.register_batch_of_step_outputs( + step_name=step_name, + indices=indices, + outputs=output, + ) + return None + if isinstance(output, list): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Non-SIMD step {step_name} " + f"produced list of results which is not expected. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) if isinstance(output, FlowControl): self._register_flow_control_output_for_non_simd_step( step_node=step_node, @@ -220,8 +258,91 @@ def register_simd_step_output( node=step_selector, expected_type=StepNode, ) + step_name = get_last_chunk_of_selector(selector=step_selector) + if step_node.output_dimensionality == 0: + # SIMD step collapsing into scalar (can happen for auto-batch casting of parameters) + if isinstance(outputs, list): + if len(outputs) == 0: + # termination of the computation as in NON-SIMD case + return None + if len(outputs) != 1: + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " + f"register output which should collapse into a scalar, but detected batched output " + f"with more than a single element (or incompatible output), " + f"making the operation not possible. This is most likely bug (either a block or " + f"Execution Engine is faulty). Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + output = outputs[0] + else: + output = outputs + if isinstance(output, FlowControl): + self._register_flow_control_output_for_non_simd_step( + step_node=step_node, + output=output, + ) + return None + self._execution_cache.register_non_batch_step_outputs( + step_name=step_name, + outputs=output, + ) + return None if ( step_node.output_dimensionality - step_node.step_execution_dimensionality + == 0 + and step_node.step_manifest.get_output_dimensionality_offset() > 0 + ): + # artificial increase in output dimensionality due to ABC which should be unwrapped + if isinstance(outputs, list) and len(outputs) == 0: + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + if step_node.child_execution_branches: + if not all(isinstance(element, FlowControl) for element in outputs): + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Flow control step {step_name} " + f"expected to only produce FlowControl objects. This is most likely bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + self._register_flow_control_output_for_simd_step( + step_node=step_node, + indices=indices, + outputs=outputs, + ) + return None + self._execution_cache.register_batch_of_step_outputs( + step_name=step_name, + indices=indices, + outputs=outputs, + ) + return None + if not isinstance(outputs, list) or len(outputs) != 1: + raise AssumptionError( + public_message=f"Error in execution engine. In context of SIMD step: {step_selector} attempts to " + f"register output which should be nested, 1-element batch, but detected batched " + f"output with more than a single element (or incompatible output), " + f"making the operation not possible. This is most likely bug (either a block or " + f"Execution Engine is faulty). Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_output_registration", + ) + index_base = indices[0][:-1] + outputs = outputs[0] + indices = [index_base + (i,) for i in range(len(outputs))] + self._dynamic_batches_manager.register_element_indices_for_lineage( + lineage=step_node.data_lineage, + indices=indices, + ) + elif ( + step_node.output_dimensionality - step_node.step_execution_dimensionality ) > 0: # increase in dimensionality indices, outputs = flatten_nested_output( @@ -231,7 +352,6 @@ def register_simd_step_output( lineage=step_node.data_lineage, indices=indices, ) - step_name = get_last_chunk_of_selector(selector=step_selector) if step_node.child_execution_branches: if not all(isinstance(element, FlowControl) for element in outputs): raise ExecutionEngineRuntimeError( @@ -275,7 +395,7 @@ def get_selector_indices(self, selector: str) -> Optional[List[DynamicBatchIndex ] selector_lineage = input_node.data_lineage elif is_step_selector(selector_or_value=potential_step_selector): - if self.is_step_simd(step_selector=potential_step_selector): + if self.does_step_produce_batches(step_selector=potential_step_selector): step_node_data: StepNode = self._execution_graph.nodes[ potential_step_selector ][NODE_COMPILATION_OUTPUT_PROPERTY] @@ -318,9 +438,23 @@ def get_non_batch_data(self, selector: str) -> Any: ) and not self.does_input_represent_batch(input_selector=selector): input_name = get_last_chunk_of_selector(selector=selector) return self._runtime_parameters[input_name] - elif is_step_selector( - selector_or_value=potential_step_selector - ) and not self.is_step_simd(step_selector=potential_step_selector): + elif is_step_selector(selector_or_value=potential_step_selector): + step_node_data = node_as( + execution_graph=self._execution_graph, + node=potential_step_selector, + expected_type=StepNode, + ) + if step_node_data.output_dimensionality != 0: + raise ExecutionEngineRuntimeError( + public_message=f"Error in execution engine. Attempted to get value of: {selector}, " + f"which was supposed to be registered as scalar output, but in fact Execution " + f"Engine denoted the output as batched one (with dimensionality: " + f"{step_node_data.output_dimensionality}). " + f"This is most likely bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | getting_workflow_data", + ) step_name = get_last_chunk_of_selector(selector=potential_step_selector) if selector.endswith(".*"): return self._execution_cache.get_all_non_batch_step_outputs( @@ -365,7 +499,7 @@ def get_batch_data( ] elif is_step_selector( selector_or_value=potential_step_selector - ) and self.is_step_simd(step_selector=potential_step_selector): + ) and self.does_step_produce_batches(step_selector=potential_step_selector): step_name = get_last_chunk_of_selector(selector=potential_step_selector) if selector.endswith(".*"): return self._execution_cache.get_all_batch_step_outputs( @@ -394,6 +528,14 @@ def is_step_simd(self, step_selector: str) -> bool: ) return step_node_data.is_batch_oriented() + def does_step_produce_batches(self, step_selector: str) -> bool: + step_node_data = node_as( + execution_graph=self._execution_graph, + node=step_selector, + expected_type=StepNode, + ) + return step_node_data.output_dimensionality > 0 + def does_input_represent_batch(self, input_selector: str) -> bool: input_node = node_as( execution_graph=self._execution_graph, diff --git a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py index b1b4f0082f..10a601a4b3 100644 --- a/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py +++ b/inference/core/workflows/execution_engine/v1/executor/execution_data_manager/step_input_assembler.py @@ -4,6 +4,7 @@ from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.entities.base import Batch from inference.core.workflows.execution_engine.v1.compiler.entities import ( + AutoBatchCastingConfig, CompoundStepInputDefinition, DynamicStepInputDefinition, StaticStepInputDefinition, @@ -245,7 +246,7 @@ def construct_simd_step_input( dynamic_batches_manager: DynamicBatchesManager, branching_manager: BranchingManager, ) -> BatchModeSIMDStepInput: - masks = construct_mask_for_all_inputs_dimensionalities( + masks, scalars_discarded = construct_mask_for_all_inputs_dimensionalities( step_node=step_node, branching_manager=branching_manager, ) @@ -253,6 +254,7 @@ def construct_simd_step_input( step_node=step_node, dynamic_batches_manager=dynamic_batches_manager, masks=masks, + scalars_discarded=scalars_discarded, runtime_parameters=runtime_parameters, execution_cache=execution_cache, ) @@ -261,7 +263,7 @@ def construct_simd_step_input( def construct_mask_for_all_inputs_dimensionalities( step_node: StepNode, branching_manager: BranchingManager, -) -> Any: +) -> Tuple[Any, bool]: inputs_dimensionalities = collect_inputs_dimensionalities(step_node=step_node) all_dimensionalities = {dim for dim in inputs_dimensionalities.values() if dim > 0} batch_masks, non_batch_masks = [], set() @@ -279,15 +281,18 @@ def construct_mask_for_all_inputs_dimensionalities( else: mask = branching_manager.get_mask(execution_branch=execution_branch) non_batch_masks.add(mask) - if False in non_batch_masks: - return {dimension: set() for dimension in all_dimensionalities} + scalar_mask_contains_false = False in non_batch_masks + if scalar_mask_contains_false: + return { + dimension: set() for dimension in all_dimensionalities + }, scalar_mask_contains_false return { dimension: get_masks_intersection_up_to_dimension( batch_masks=batch_masks, dimension=dimension, ) for dimension in all_dimensionalities - } + }, scalar_mask_contains_false def collect_inputs_dimensionalities( @@ -355,9 +360,11 @@ def prepare_parameters( step_node: StepNode, dynamic_batches_manager: DynamicBatchesManager, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, ) -> BatchModeSIMDStepInput: + step_requests_batch_input = step_node.step_manifest.accepts_batch_input() result = {} indices_for_parameter = {} guard_of_indices_wrapping = GuardForIndicesWrapping() @@ -373,10 +380,13 @@ def prepare_parameters( parameter=parameter_specs, step_execution_dimensionality=step_node.step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) compound_inputs.add(parameter_name) else: @@ -388,10 +398,13 @@ def prepare_parameters( parameter=parameter_specs, step_execution_dimensionality=step_node.step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + auto_batch_casting_lineage_supports=step_node.auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) contains_empty_scalar_step_output_selector = ( contains_empty_scalar_step_output_selector @@ -418,8 +431,9 @@ def prepare_parameters( parameters={}, ) empty_indices = get_empty_batch_elements_indices(value=result) - indices = [e for e in indices if e not in empty_indices] - result = remove_indices(value=result, indices=empty_indices) + if empty_indices: + indices = [e for e in indices if e not in empty_indices] + result = remove_indices(value=result, indices=empty_indices) return BatchModeSIMDStepInput( indices=indices, parameters=result, @@ -430,10 +444,13 @@ def get_compound_parameter_value( parameter: CompoundStepInputDefinition, step_execution_dimensionality: int, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, dynamic_batches_manager: DynamicBatchesManager, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], + step_requests_batch_input: bool, ) -> Tuple[Union[list, Dict[str, Any]], Optional[List[DynamicBatchIndex]], bool]: contains_empty_scalar_step_output_selector = False batch_indices = [] @@ -448,10 +465,13 @@ def get_compound_parameter_value( parameter=nested_element, step_execution_dimensionality=step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) result.append(non_compound_parameter_value) contains_empty_scalar_step_output_selector = ( @@ -471,10 +491,13 @@ def get_compound_parameter_value( parameter=nested_element, step_execution_dimensionality=step_execution_dimensionality, masks=masks, + scalars_discarded=scalars_discarded, dynamic_batches_manager=dynamic_batches_manager, runtime_parameters=runtime_parameters, execution_cache=execution_cache, guard_of_indices_wrapping=guard_of_indices_wrapping, + auto_batch_casting_lineage_supports=auto_batch_casting_lineage_supports, + step_requests_batch_input=step_requests_batch_input, ) result[nested_element.parameter_specification.nested_element_key] = ( non_compound_parameter_value @@ -496,27 +519,89 @@ def get_non_compound_parameter_value( parameter: StepInputDefinition, step_execution_dimensionality: int, masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, dynamic_batches_manager: DynamicBatchesManager, runtime_parameters: Dict[str, Any], execution_cache: ExecutionCache, guard_of_indices_wrapping: GuardForIndicesWrapping, -) -> Union[Any, Optional[List[DynamicBatchIndex]], bool]: + auto_batch_casting_lineage_supports: Dict[str, AutoBatchCastingConfig], + step_requests_batch_input: bool, +) -> Tuple[Any, Optional[List[DynamicBatchIndex]], bool]: if not parameter.is_batch_oriented(): + requested_as_batch = ( + parameter.parameter_specification.parameter_name + in auto_batch_casting_lineage_supports + ) if parameter.points_to_input(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_name = get_last_chunk_of_selector( selector=input_parameter.selector ) - return runtime_parameters[parameter_name], None, False + if not requested_as_batch: + return runtime_parameters[parameter_name], None, False + else: + return apply_auto_batch_casting( + parameter_name=parameter_name, + value=runtime_parameters[parameter_name], + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=False, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=False, + ) elif parameter.points_to_step_output(): input_parameter: DynamicStepInputDefinition = parameter # type: ignore value = execution_cache.get_non_batch_output( selector=input_parameter.selector ) - return value, None, value is None + if not requested_as_batch: + return value, None, value is None + else: + return apply_auto_batch_casting( + parameter_name=parameter.parameter_specification.parameter_name, + value=value, + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=value is None, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=scalars_discarded, + ) else: static_input: StaticStepInputDefinition = parameter # type: ignore - return static_input.value, None, False + if not requested_as_batch or static_input.value is None: + # when we have Optional[Selector()] in manifest - we must retain + # ability to inject None into the run(...) parameters - as + # if we treat that as actual batch and broadcast Batch[None], + # we would behave exactly as condition execution does - + # and the logic executing after this, will filter-out empty + # elements - so on None, we behave "the old way" regardless of the fact that ABC + # was requested + return static_input.value, None, False + else: + return apply_auto_batch_casting( + parameter_name=parameter.parameter_specification.parameter_name, + value=static_input.value, + auto_batch_casting_config=auto_batch_casting_lineage_supports[ + parameter.parameter_specification.parameter_name + ], + contains_empty_scalar_step_output_selector=False, + dynamic_batches_manager=dynamic_batches_manager, + step_execution_dimensionality=step_execution_dimensionality, + guard_of_indices_wrapping=guard_of_indices_wrapping, + step_requests_batch_input=step_requests_batch_input, + masks=masks, + scalars_discarded=False, + ) dynamic_parameter: DynamicStepInputDefinition = parameter # type: ignore parameter_dimensionality = dynamic_parameter.get_dimensionality() lineage_indices = dynamic_batches_manager.get_indices_for_data_lineage( @@ -578,9 +663,25 @@ def get_non_compound_parameter_value( f"the problem - including workflow definition you use.", context="workflow_execution | step_input_assembling", ) - upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( - lineage=dynamic_parameter.data_lineage[:-1], - ) + if step_execution_dimensionality == 0 and not step_requests_batch_input: + return Batch(batch_input, lineage_indices), lineage_indices, False + upper_lineage = dynamic_parameter.data_lineage[:-1] + if len(upper_lineage) == 0: + if not step_requests_batch_input: + raise AssumptionError( + public_message=f"Parameter: {parameter.parameter_specification.parameter_name} " + f"requires dimensionality wrapping, but registered lineage support is incompatible " + f"which should be detected by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_indices = [()] + else: + upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=dynamic_parameter.data_lineage[:-1], + ) result = reduce_batch_dimensionality( indices=lineage_indices, upper_level_index=upper_level_indices, @@ -590,6 +691,131 @@ def get_non_compound_parameter_value( return result, result.indices, False +def apply_auto_batch_casting( + parameter_name: str, + value: Any, + auto_batch_casting_config: AutoBatchCastingConfig, + contains_empty_scalar_step_output_selector: bool, + dynamic_batches_manager: DynamicBatchesManager, + step_execution_dimensionality: int, + guard_of_indices_wrapping: GuardForIndicesWrapping, + step_requests_batch_input: bool, + masks: Dict[int, Optional[Set[DynamicBatchIndex]]], + scalars_discarded: bool, +) -> Tuple[Any, List[DynamicBatchIndex], bool]: + if auto_batch_casting_config.lineage_support is None: + indices = [(0,) * auto_batch_casting_config.casted_dimensionality] + else: + indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=auto_batch_casting_config.lineage_support, + ) + missing_dimensions = auto_batch_casting_config.casted_dimensionality - len( + auto_batch_casting_config.lineage_support + ) + if missing_dimensions > 0: + padding = (0,) * missing_dimensions + indices = [i + padding for i in indices] + if scalars_discarded: + batch_content = [None] * len(indices) + elif auto_batch_casting_config.lineage_support is None: + batch_content = [value] * len(indices) + else: + support_dimensionality = len(auto_batch_casting_config.lineage_support) + mask_for_support_dimensionality = masks[support_dimensionality] + if mask_for_support_dimensionality is None: + batch_content = [value] * len(indices) + else: + batch_content = [] + for index in indices: + if index[:support_dimensionality] in mask_for_support_dimensionality: + batch_content.append(value) + else: + batch_content.append(None) + created_batch = Batch(content=batch_content, indices=indices) + if step_execution_dimensionality == auto_batch_casting_config.casted_dimensionality: + return ( + created_batch, + indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) + if step_execution_dimensionality > auto_batch_casting_config.casted_dimensionality: + raise ExecutionEngineRuntimeError( + public_message=f"Detected a situation when parameter: " + f"{parameter_name}" + f"has auto-batch casted dimensionality {auto_batch_casting_config.casted_dimensionality} larger " + f"than step execution dimensionality: {step_execution_dimensionality}. " + f"This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + if ( + abs( + auto_batch_casting_config.casted_dimensionality + - step_execution_dimensionality + ) + > 1 + ): + raise ExecutionEngineRuntimeError( + public_message=f"Detected a situation when parameter: " + f"{parameter_name} has auto batch casted " + f"dimensionality {auto_batch_casting_config.casted_dimensionality} differing more than one level " + f"from step execution dimensionality: {step_execution_dimensionality}. " + f"This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_lineage_dimensionality = ( + auto_batch_casting_config.casted_dimensionality - 1 + ) + if upper_level_lineage_dimensionality == 0 and not step_requests_batch_input: + # for batch collapse into scalar + return ( + created_batch, + indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) + if auto_batch_casting_config.lineage_support is None: + upper_level_indices = [indices[0][:-1]] + else: + upper_level_lineage = auto_batch_casting_config.lineage_support[ + :upper_level_lineage_dimensionality + ] + if ( + upper_level_lineage_dimensionality < 1 + or len(upper_level_lineage) < upper_level_lineage_dimensionality + ): + if not step_requests_batch_input: + raise AssumptionError( + public_message=f"Detected a situation when parameter: {parameter_name} requires dimensionality " + f"wrapping, but registered lineage support is incompatible which should be detected " + f"by the compiler. This is most likely a bug. " + f"Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | step_input_assembling", + ) + upper_level_indices = [()] + else: + upper_level_indices = dynamic_batches_manager.get_indices_for_data_lineage( + lineage=upper_level_lineage, + ) + result = reduce_batch_dimensionality( + indices=indices, + upper_level_index=upper_level_indices, + data=batch_content, + guard_of_indices_wrapping=guard_of_indices_wrapping, + ) + return ( + result, + result.indices, + contains_empty_scalar_step_output_selector or scalars_discarded, + ) + + def _flatten_batch_oriented_inputs( inputs: list, dimensionality: int, diff --git a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py index 2836911fb3..e0bfff65bc 100644 --- a/inference/core/workflows/execution_engine/v1/executor/output_constructor.py +++ b/inference/core/workflows/execution_engine/v1/executor/output_constructor.py @@ -1,5 +1,6 @@ import traceback -from typing import Any, Callable, Dict, List, Optional, Union +from collections import defaultdict +from typing import Any, Callable, Dict, List, Optional, Set, Union import numpy as np import supervision as sv @@ -11,6 +12,7 @@ ) from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError from inference.core.workflows.execution_engine.constants import ( + TOP_LEVEL_LINEAGES_KEY, WORKFLOW_INPUT_BATCH_LINEAGE_ID, ) from inference.core.workflows.execution_engine.entities.base import ( @@ -72,6 +74,21 @@ def construct_workflow_output( non_batch_outputs[output.name] = data_piece if not batch_oriented_outputs: return [non_batch_outputs] + outputs_for_generated_lineage = defaultdict(set) + outputs_for_input_dictated_lineage = set() + for output in workflow_outputs: + if output.name not in batch_oriented_outputs: + outputs_for_input_dictated_lineage.add(output.name) + continue + top_level_lineage = node_as( + execution_graph=execution_graph, + node=construct_output_selector(name=output.name), + expected_type=OutputNode, + ).data_lineage[0] + if top_level_lineage == WORKFLOW_INPUT_BATCH_LINEAGE_ID: + outputs_for_input_dictated_lineage.add(output.name) + else: + outputs_for_generated_lineage[top_level_lineage].add(output.name) dimensionality_for_output_nodes = { output.name: node_as( execution_graph=execution_graph, @@ -80,23 +97,71 @@ def construct_workflow_output( ).dimensionality for output in workflow_outputs } + results = create_outputs_for_input_induced_lineages( + output_name2indices=output_name2indices, + outputs_for_input_dictated_lineage=outputs_for_input_dictated_lineage, + workflow_outputs=workflow_outputs, + execution_data_manager=execution_data_manager, + serialize_results=serialize_results, + kinds_serializers=kinds_serializers, + kinds_of_output_nodes=kinds_of_output_nodes, + non_batch_outputs=non_batch_outputs, + dimensionality_for_output_nodes=dimensionality_for_output_nodes, + batch_oriented_outputs=batch_oriented_outputs, + ) + if len(results) == 0 and len(outputs_for_generated_lineage) > 0: + results.append({}) + for generated_lineage, outputs_names in outputs_for_generated_lineage.items(): + results_for_outputs_of_generated_lineage = ( + create_outputs_for_generated_lineage_outputs( + generated_lineage=generated_lineage, + output_name2indices=output_name2indices, + outputs_for_generated_lineage=outputs_names, + workflow_outputs=workflow_outputs, + execution_data_manager=execution_data_manager, + serialize_results=serialize_results, + kinds_serializers=kinds_serializers, + kinds_of_output_nodes=kinds_of_output_nodes, + dimensionality_for_output_nodes=dimensionality_for_output_nodes, + ) + ) + for output in results: + output.update(results_for_outputs_of_generated_lineage) + return results + + +def create_outputs_for_input_induced_lineages( + output_name2indices: Dict[str, Optional[List[tuple]]], + outputs_for_input_dictated_lineage: Set[str], + workflow_outputs: List[JsonField], + execution_data_manager: ExecutionDataManager, + serialize_results: bool, + kinds_serializers: Dict[str, Callable[[Any], Any]], + kinds_of_output_nodes: Dict[ + str, Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]] + ], + non_batch_outputs: Dict[str, Any], + dimensionality_for_output_nodes: Dict[str, int], + batch_oriented_outputs: Set[str], +) -> List[Dict[str, Any]]: outputs_arrays: Dict[str, Optional[list]] = { name: create_array(indices=np.array(indices)) for name, indices in output_name2indices.items() - if name in batch_oriented_outputs + if name in outputs_for_input_dictated_lineage and name in batch_oriented_outputs + } + name2selector = { + output.name: output.selector + for output in workflow_outputs + if output.name in outputs_for_input_dictated_lineage } - name2selector = {output.name: output.selector for output in workflow_outputs} outputs_requested_in_parent_coordinates = { output.name for output in workflow_outputs if output.coordinates_system is CoordinatesSystem.PARENT } - major_batch_size = len( - execution_data_manager.get_lineage_indices( - lineage=[WORKFLOW_INPUT_BATCH_LINEAGE_ID] - ) - ) - for name in batch_oriented_outputs: + for name in outputs_for_input_dictated_lineage: + if name not in batch_oriented_outputs: + continue array = outputs_arrays[name] indices = output_name2indices[name] data = execution_data_manager.get_batch_data( @@ -133,6 +198,14 @@ def construct_workflow_output( context="workflow_execution | output_construction", ) results = [] + if not outputs_arrays: + major_batch_size = 1 if len(non_batch_outputs) > 0 else 0 + else: + major_batch_size = len( + execution_data_manager.get_lineage_indices( + lineage=[WORKFLOW_INPUT_BATCH_LINEAGE_ID] + ) + ) for i in range(major_batch_size): single_result = {} for name, value in non_batch_outputs.items(): @@ -154,6 +227,91 @@ def construct_workflow_output( return results +def create_outputs_for_generated_lineage_outputs( + generated_lineage: str, + output_name2indices: Dict[str, Optional[List[tuple]]], + outputs_for_generated_lineage: Set[str], + workflow_outputs: List[JsonField], + execution_data_manager: ExecutionDataManager, + serialize_results: bool, + kinds_serializers: Dict[str, Callable[[Any], Any]], + kinds_of_output_nodes: Dict[ + str, Union[List[Union[Kind, str]], Dict[str, List[Union[Kind, str]]]] + ], + dimensionality_for_output_nodes: Dict[str, int], +) -> Dict[str, List[Any]]: + outputs_arrays: Dict[str, Optional[list]] = { + name: create_array(indices=np.array(indices)) + for name, indices in output_name2indices.items() + if name in outputs_for_generated_lineage + } + name2selector = { + output.name: output.selector + for output in workflow_outputs + if output.name in outputs_for_generated_lineage + } + outputs_requested_in_parent_coordinates = { + output.name + for output in workflow_outputs + if output.coordinates_system is CoordinatesSystem.PARENT + } + for name in outputs_for_generated_lineage: + array = outputs_arrays[name] + indices = output_name2indices[name] + data = execution_data_manager.get_batch_data( + selector=name2selector[name], + indices=indices, + ) + for index, data_piece in zip(indices, data): + if ( + name in outputs_requested_in_parent_coordinates + and data_contains_sv_detections(data=data_piece) + ): + data_piece = convert_sv_detections_coordinates(data=data_piece) + if serialize_results: + output_kind = kinds_of_output_nodes[name] + data_piece = serialize_data_piece( + output_name=name, + data_piece=data_piece, + kind=output_kind, + kinds_serializers=kinds_serializers, + ) + try: + place_data_in_array( + array=array, + index=index, + data=data_piece, + ) + except (TypeError, IndexError): + raise ExecutionEngineRuntimeError( + public_message=f"Could not produce output {name} die to mismatch in " + f"declared output dimensions versus actual ones." + f"This is most likely a bug. Contact Roboflow team through github issues " + f"(https://github.com/roboflow/inference/issues) providing full context of" + f"the problem - including workflow definition you use.", + context="workflow_execution | output_construction", + ) + major_batch_size = len( + execution_data_manager.get_lineage_indices(lineage=[generated_lineage]) + ) + results = {name: [] for name in outputs_arrays} + for i in range(major_batch_size): + for name, array in outputs_arrays.items(): + if array is None or len(array) <= i: + level = dimensionality_for_output_nodes[name] - 1 + if level > 0: + element = create_empty_index_array( + level=level, + accumulator=[], + ) + else: + element = None + else: + element = array[i] + results[name].append(element) + return results + + def create_array(indices: np.ndarray) -> Optional[list]: if indices.size == 0: return None diff --git a/inference/core/workflows/prototypes/block.py b/inference/core/workflows/prototypes/block.py index cdb01e783f..e14a16bca6 100644 --- a/inference/core/workflows/prototypes/block.py +++ b/inference/core/workflows/prototypes/block.py @@ -56,8 +56,9 @@ def get_output_dimensionality_offset( @classmethod def accepts_batch_input(cls) -> bool: - return len(cls.get_parameters_accepting_batches()) > 0 or len( - cls.get_parameters_accepting_batches_and_scalars() + return ( + len(cls.get_parameters_accepting_batches()) > 0 + or len(cls.get_parameters_accepting_batches_and_scalars()) > 0 ) @classmethod @@ -68,6 +69,10 @@ def get_parameters_accepting_batches(cls) -> List[str]: def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: return [] + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return [] + @classmethod def accepts_empty_values(cls) -> bool: return False diff --git a/tests/benchmarks/core/test_speed_benchmark.py b/tests/benchmarks/core/test_speed_benchmark.py index 1fd1cd80e9..4cf0d8fa65 100644 --- a/tests/benchmarks/core/test_speed_benchmark.py +++ b/tests/benchmarks/core/test_speed_benchmark.py @@ -14,7 +14,7 @@ def dataset_reference() -> tuple[list[np.ndarray], set[tuple[int, int]]]: # args of inference benchmark python-package-speed -m yolov8n-seg-640 -bi 10000 command args = { - "dataset_reference": "coco", + "dataset_reference": "coco", "warm_up_inferences": 10, "benchmark_inferences": 10000, "batch_size": 1, @@ -23,6 +23,7 @@ def dataset_reference() -> tuple[list[np.ndarray], set[tuple[int, int]]]: "output_location": None, } + def test_benchmark_equivalent_rfdetr(benchmark, dataset_reference): images, image_sizes = dataset_reference @@ -30,6 +31,7 @@ def test_benchmark_equivalent_rfdetr(benchmark, dataset_reference): benchmark(model.infer, images) + def test_benchmark_equivalent_yolov8n_seg(benchmark, dataset_reference): images, image_sizes = dataset_reference @@ -37,9 +39,10 @@ def test_benchmark_equivalent_yolov8n_seg(benchmark, dataset_reference): benchmark(model.infer, images) + def test_benchmark_equivalent_yolov8n(benchmark, dataset_reference): images, image_sizes = dataset_reference model = get_model(model_id="yolov8n-640", api_key=None) - benchmark(model.infer, images) \ No newline at end of file + benchmark(model.infer, images) diff --git a/tests/common.py b/tests/common.py index 157a4e6373..f66492b240 100644 --- a/tests/common.py +++ b/tests/common.py @@ -20,12 +20,14 @@ def assert_localized_predictions_match( # in that, if after sorting by confidence the predictions are not ordered the same, likely they wouldn't pass this assertion anyway # the rigid assumption there is that the smallest gap between confidences is higher than our similarity threshold - assert len(sv_result_prediction) == len(sv_reference_prediction), "Predictions must have the same number of detections" + assert len(sv_result_prediction) == len( + sv_reference_prediction + ), "Predictions must have the same number of detections" assert np.allclose( sv_result_prediction.xyxy, sv_reference_prediction.xyxy, - atol=box_pixel_tolerance + atol=box_pixel_tolerance, ), ( f"Bounding boxes must match with a tolerance of {box_pixel_tolerance} pixels, " f"got {sv_result_prediction.xyxy} and {sv_reference_prediction.xyxy}" @@ -35,7 +37,7 @@ def assert_localized_predictions_match( assert np.allclose( sv_result_prediction.confidence, sv_reference_prediction.confidence, - atol=box_confidence_tolerance + atol=box_confidence_tolerance, ), ( f"Confidence must match with a tolerance of {box_confidence_tolerance}, " f"got {sv_result_prediction.confidence} and {sv_reference_prediction.confidence}" @@ -43,18 +45,23 @@ def assert_localized_predictions_match( if sv_reference_prediction.class_id is not None: assert np.array_equal( - sv_result_prediction.class_id, - sv_reference_prediction.class_id - ), ( - f"Class IDs must match, got {sv_result_prediction.class_id} and {sv_reference_prediction.class_id}" - ) - + sv_result_prediction.class_id, sv_reference_prediction.class_id + ), f"Class IDs must match, got {sv_result_prediction.class_id} and {sv_reference_prediction.class_id}" + # now for keypoint and mask specific assertions if sv_reference_prediction.mask is not None: - assert sv_result_prediction.mask is not None, "Mask must be present for instance segmentation predictions" - iou = np.sum(sv_result_prediction.mask & sv_reference_prediction.mask, axis=(1, 2)) / np.sum(sv_result_prediction.mask | sv_reference_prediction.mask, axis=(1, 2)) - assert np.all(iou > mask_iou_threshold), f"Mask IOU must be greater than {mask_iou_threshold} for all predictions, got {iou}" + assert ( + sv_result_prediction.mask is not None + ), "Mask must be present for instance segmentation predictions" + iou = np.sum( + sv_result_prediction.mask & sv_reference_prediction.mask, axis=(1, 2) + ) / np.sum( + sv_result_prediction.mask | sv_reference_prediction.mask, axis=(1, 2) + ) + assert np.all( + iou > mask_iou_threshold + ), f"Mask IOU must be greater than {mask_iou_threshold} for all predictions, got {iou}" if all("keypoints" not in p for p in reference_prediction["predictions"]): return None @@ -62,12 +69,14 @@ def assert_localized_predictions_match( result_prediction_keypoints = sv.KeyPoints.from_inference(result_prediction) reference_prediction_keypoints = sv.KeyPoints.from_inference(reference_prediction) - assert len(result_prediction_keypoints) == len(reference_prediction_keypoints), "Keypoints must have the same number of keypoints" + assert len(result_prediction_keypoints) == len( + reference_prediction_keypoints + ), "Keypoints must have the same number of keypoints" assert np.allclose( result_prediction_keypoints.xy, reference_prediction_keypoints.xy, - atol=keypoint_pixel_tolerance + atol=keypoint_pixel_tolerance, ), ( f"Keypoints must match with a tolerance of {keypoint_pixel_tolerance} pixels, " f"got {result_prediction_keypoints.xy} and {reference_prediction_keypoints.xy}" @@ -78,7 +87,7 @@ def assert_localized_predictions_match( assert np.allclose( result_prediction_keypoints.confidence, reference_prediction_keypoints.confidence, - atol=keypoint_confidence_tolerance + atol=keypoint_confidence_tolerance, ), ( f"Keypoint confidence must match with a tolerance of {keypoint_confidence_tolerance}, " f"got {result_prediction_keypoints.confidence} and {reference_prediction_keypoints.confidence}" @@ -87,10 +96,8 @@ def assert_localized_predictions_match( if result_prediction_keypoints.class_id is not None: assert np.array_equal( result_prediction_keypoints.class_id, - reference_prediction_keypoints.class_id - ), ( - f"Keypoint class IDs must match, got {result_prediction_keypoints.class_id} and {reference_prediction_keypoints.class_id}" - ) + reference_prediction_keypoints.class_id, + ), f"Keypoint class IDs must match, got {result_prediction_keypoints.class_id} and {reference_prediction_keypoints.class_id}" def assert_classification_predictions_match( @@ -98,20 +105,24 @@ def assert_classification_predictions_match( reference_prediction: dict, confidence_tolerance: float = 1e-5, ) -> None: - assert type(result_prediction) == type(reference_prediction), "Predictions must be of the same type" - assert len(result_prediction["predictions"]) == len(reference_prediction["predictions"]), "Predictions must have the same number of predictions" + assert type(result_prediction) == type( + reference_prediction + ), "Predictions must be of the same type" + assert len(result_prediction["predictions"]) == len( + reference_prediction["predictions"] + ), "Predictions must have the same number of predictions" if isinstance(reference_prediction["predictions"], dict): - assert sorted(result_prediction["predicted_classes"]) == sorted(reference_prediction["predicted_classes"]), ( - f"Predicted classes must match, got {result_prediction['predicted_classes']} and {result_prediction['predicted_classes']}" - ) + assert sorted(result_prediction["predicted_classes"]) == sorted( + reference_prediction["predicted_classes"] + ), f"Predicted classes must match, got {result_prediction['predicted_classes']} and {result_prediction['predicted_classes']}" else: - assert result_prediction["top"] == reference_prediction["top"], ( - f"Top prediction must match, got {result_prediction['top']} and {reference_prediction['top']}" - ) + assert ( + result_prediction["top"] == reference_prediction["top"] + ), f"Top prediction must match, got {result_prediction['top']} and {reference_prediction['top']}" assert np.allclose( result_prediction["confidence"], reference_prediction["confidence"], - atol=confidence_tolerance + atol=confidence_tolerance, ), ( f"Confidences must match with a tolerance of {confidence_tolerance}, " f"got {result_prediction['confidence']} and {reference_prediction['confidence']}" diff --git a/tests/conftest.py b/tests/conftest.py index 8b2b10caac..8678455903 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -3,10 +3,21 @@ import pytest -ASSETS_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "inference", "unit_tests", "core", "interfaces", "assets")) +ASSETS_DIR = os.path.abspath( + os.path.join( + os.path.dirname(__file__), + "inference", + "unit_tests", + "core", + "interfaces", + "assets", + ) +) os.environ["TELEMETRY_OPT_OUT"] = "True" -os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = "[CUDAExecutionProvider,CPUExecutionProvider]" +os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = ( + "[CUDAExecutionProvider,CPUExecutionProvider]" +) @pytest.fixture diff --git a/tests/google_colab/conftest.py b/tests/google_colab/conftest.py index 7b1a733872..30b581c5d6 100644 --- a/tests/google_colab/conftest.py +++ b/tests/google_colab/conftest.py @@ -11,9 +11,13 @@ os.environ["ONNXRUNTIME_EXECUTION_PROVIDERS"] = "[CUDAExecutionProvider]" -REFERENCE_IMAGE_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets", "dog.jpeg")) +REFERENCE_IMAGE_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), "assets", "dog.jpeg") +) REFERENCE_VIDEO_URL = "https://drive.google.com/uc?id=1vVwjW1dE1drIdd4ZSILfbCGPD4weoNiu" -REFERENCE_VIDEO_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), "assets", "video.mp4")) +REFERENCE_VIDEO_PATH = os.path.abspath( + os.path.join(os.path.dirname(__file__), "assets", "video.mp4") +) PLAYER_DETECTION_MODEL_ID = "football-players-detection-3zvbc/11" PLAYER_CLASS_ID = 2 FOOTBALL_FIELD_DETECTOR_MODEL_ID = "football-field-detection-f07vi/14" @@ -35,5 +39,3 @@ def reference_video() -> str: return REFERENCE_VIDEO_PATH gdown.download(REFERENCE_VIDEO_URL, REFERENCE_VIDEO_PATH) return REFERENCE_VIDEO_PATH - - diff --git a/tests/google_colab/test_footbal_ai_functionalities.py b/tests/google_colab/test_footbal_ai_functionalities.py index 1739c767f4..543ead07cd 100644 --- a/tests/google_colab/test_footbal_ai_functionalities.py +++ b/tests/google_colab/test_footbal_ai_functionalities.py @@ -1,16 +1,21 @@ import numpy as np import supervision as sv from inference import get_model -from tests.google_colab.conftest import PLAYER_DETECTION_MODEL_ID, PLAYER_CLASS_ID, FOOTBALL_FIELD_DETECTOR_MODEL_ID +from tests.google_colab.conftest import ( + PLAYER_DETECTION_MODEL_ID, + PLAYER_CLASS_ID, + FOOTBALL_FIELD_DETECTOR_MODEL_ID, +) -def test_cropping_players( - reference_video: str, - roboflow_api_key: str -) -> None: +def test_cropping_players(reference_video: str, roboflow_api_key: str) -> None: # given - player_detection_model = get_model(model_id=PLAYER_DETECTION_MODEL_ID, api_key=roboflow_api_key) - frame_generator = sv.get_video_frames_generator(source_path=reference_video, stride=30) + player_detection_model = get_model( + model_id=PLAYER_DETECTION_MODEL_ID, api_key=roboflow_api_key + ) + frame_generator = sv.get_video_frames_generator( + source_path=reference_video, stride=30 + ) # when crops = [] @@ -26,18 +31,12 @@ def test_cropping_players( assert len(crops) >= 470 -def test_detecting_football_field( - reference_video: str, - roboflow_api_key: str -) -> None: +def test_detecting_football_field(reference_video: str, roboflow_api_key: str) -> None: # given field_detector_model = get_model(FOOTBALL_FIELD_DETECTOR_MODEL_ID) frame_generator = sv.get_video_frames_generator(reference_video) frame = next(frame_generator) - vertex_annotator = sv.VertexAnnotator( - color=sv.Color.from_hex("#FF1493"), - radius=8 - ) + vertex_annotator = sv.VertexAnnotator(color=sv.Color.from_hex("#FF1493"), radius=8) result = field_detector_model.infer(frame, confidence=0.3)[0] key_points = sv.KeyPoints.from_inference(result) filtered_key_points = key_points.confidence[0] > 0.5 diff --git a/tests/google_colab/test_supervision_interoperability.py b/tests/google_colab/test_supervision_interoperability.py index 19f4aadf49..e8f04dd823 100644 --- a/tests/google_colab/test_supervision_interoperability.py +++ b/tests/google_colab/test_supervision_interoperability.py @@ -9,12 +9,12 @@ def test_basic_object_detection_visualization(reference_image: np.ndarray) -> No # given model = get_model("yolov8n-640") box_annotator = sv.BoxAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - thickness=2 + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + thickness=2, ) label_annotator = sv.LabelAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - text_color=sv.Color.from_hex('#000000') + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + text_color=sv.Color.from_hex("#000000"), ) # when @@ -23,17 +23,15 @@ def test_basic_object_detection_visualization(reference_image: np.ndarray) -> No annotated_frame = reference_image.copy() labels = [ f"{class_name} {confidence:.2f}" - for class_name, confidence - in zip(detections["class_name"], detections.confidence) + for class_name, confidence in zip( + detections["class_name"], detections.confidence + ) ] annotated_frame = box_annotator.annotate( - scene=annotated_frame, - detections=detections + scene=annotated_frame, detections=detections ) annotated_frame = label_annotator.annotate( - scene=annotated_frame, - detections=detections, - labels=labels + scene=annotated_frame, detections=detections, labels=labels ) # then @@ -45,11 +43,11 @@ def test_basic_instance_segmentation_visualization(reference_image: np.ndarray) # given model = get_model("yolov8n-seg-640") mask_annotator = sv.MaskAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), ) label_annotator = sv.LabelAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - text_color=sv.Color.from_hex('#000000') + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + text_color=sv.Color.from_hex("#000000"), ) # when @@ -58,17 +56,15 @@ def test_basic_instance_segmentation_visualization(reference_image: np.ndarray) annotated_frame = reference_image.copy() labels = [ f"{class_name} {confidence:.2f}" - for class_name, confidence - in zip(detections["class_name"], detections.confidence) + for class_name, confidence in zip( + detections["class_name"], detections.confidence + ) ] annotated_frame = mask_annotator.annotate( - scene=annotated_frame, - detections=detections + scene=annotated_frame, detections=detections ) annotated_frame = label_annotator.annotate( - scene=annotated_frame, - detections=detections, - labels=labels + scene=annotated_frame, detections=detections, labels=labels ) # then @@ -87,8 +83,7 @@ def test_basic_pose_estimation_visualization(reference_image: np.ndarray) -> Non key_points = sv.KeyPoints.from_inference(result_raw) annotated_frame = reference_image.copy() annotated_frame = vertex_annotator.annotate( - scene=annotated_frame, - key_points=key_points + scene=annotated_frame, key_points=key_points ) annotated_frame = label_annotator.annotate( scene=annotated_frame, @@ -107,8 +102,8 @@ def test_basic_tracking(reference_image: np.ndarray) -> None: tracker.reset() tracker_annotator = sv.TraceAnnotator() box_annotator = sv.BoxAnnotator( - color=sv.ColorPalette.from_hex(['#FF8C00', '#00BFFF', '#FF1493', '#FFD700']), - thickness=2 + color=sv.ColorPalette.from_hex(["#FF8C00", "#00BFFF", "#FF1493", "#FFD700"]), + thickness=2, ) # when diff --git a/tests/google_colab/test_yolo_models.py b/tests/google_colab/test_yolo_models.py index e39a5a64d8..1def8ceee3 100644 --- a/tests/google_colab/test_yolo_models.py +++ b/tests/google_colab/test_yolo_models.py @@ -305,7 +305,6 @@ def test_yolov11n_instance_segmentation_inference(reference_image: np.ndarray) - assert len(result.mask) > 0, "At least one prediction is expected" - def test_yolov11s_instance_segmentation_inference(reference_image: np.ndarray) -> None: # given model = get_model("yolov11s-seg-640") diff --git a/tests/inference/hosted_platform_tests/test_workflows.py b/tests/inference/hosted_platform_tests/test_workflows.py index 33654bd321..8d34b6c3b2 100644 --- a/tests/inference/hosted_platform_tests/test_workflows.py +++ b/tests/inference/hosted_platform_tests/test_workflows.py @@ -129,7 +129,7 @@ def test_get_versions_of_execution_engine(object_detection_service_url: str) -> # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.5.0"] + assert response_data["versions"] == ["1.6.0"] FUNCTION = """ diff --git a/tests/inference/integration_tests/test_workflow_endpoints.py b/tests/inference/integration_tests/test_workflow_endpoints.py index e8d3c38135..da98e9ecc2 100644 --- a/tests/inference/integration_tests/test_workflow_endpoints.py +++ b/tests/inference/integration_tests/test_workflow_endpoints.py @@ -691,7 +691,7 @@ def test_get_versions_of_execution_engine(server_url: str) -> None: # then response.raise_for_status() response_data = response.json() - assert response_data["versions"] == ["1.5.0"] + assert response_data["versions"] == ["1.6.0"] def test_getting_block_schema_using_get_endpoint(server_url) -> None: diff --git a/tests/inference/unit_tests/core/utils/test_file_system.py b/tests/inference/unit_tests/core/utils/test_file_system.py index 68b2e83c9e..7792a300e3 100644 --- a/tests/inference/unit_tests/core/utils/test_file_system.py +++ b/tests/inference/unit_tests/core/utils/test_file_system.py @@ -449,7 +449,7 @@ def test_atomic_path_with_existing_file_override(empty_local_dir: str) -> None: target_path = os.path.join(empty_local_dir, "test.txt") original_content = "original" new_content = "new content" - + with open(target_path, "w") as f: f.write(original_content) @@ -511,7 +511,7 @@ def test_dump_json_atomic_when_file_exists_with_override(empty_local_dir: str) - file_path = os.path.join(empty_local_dir, "test.json") original_content = {"old": "data"} new_content = {"new": "data"} - + with open(file_path, "w") as f: json.dump(original_content, f) @@ -569,7 +569,9 @@ def test_dump_text_lines_atomic_when_file_does_not_exist(empty_local_dir: str) - assert f.read() == "line1\nline2\nline3" -def test_dump_text_lines_atomic_when_file_exists_no_override(empty_local_dir: str) -> None: +def test_dump_text_lines_atomic_when_file_exists_no_override( + empty_local_dir: str, +) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") touch(file_path) @@ -579,12 +581,14 @@ def test_dump_text_lines_atomic_when_file_exists_no_override(empty_local_dir: st dump_text_lines_atomic(path=file_path, content=["line1"], allow_override=False) -def test_dump_text_lines_atomic_when_file_exists_with_override(empty_local_dir: str) -> None: +def test_dump_text_lines_atomic_when_file_exists_with_override( + empty_local_dir: str, +) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") with open(file_path, "w") as f: f.write("original content") - + new_content = ["new", "lines"] # when @@ -654,7 +658,7 @@ def test_dump_bytes_atomic_when_file_exists_with_override(empty_local_dir: str) file_path = os.path.join(empty_local_dir, "test.bin") with open(file_path, "wb") as f: f.write(b"original data") - + new_content = b"new binary data" # when @@ -687,14 +691,14 @@ def test_atomic_write_maintains_original_on_error(empty_local_dir: str) -> None: # given file_path = os.path.join(empty_local_dir, "test.txt") original_content = "original content that should be preserved" - + with open(file_path, "w") as f: f.write(original_content) - + # when - simulate a write error by mocking class WriteError(Exception): pass - + try: with AtomicPath(file_path, allow_override=True) as temp_path: with open(temp_path, "w") as f: @@ -703,7 +707,7 @@ class WriteError(Exception): raise WriteError("Simulated write failure") except WriteError: pass - + # then - original file should be unchanged assert os.path.exists(file_path) with open(file_path) as f: @@ -714,20 +718,20 @@ def test_atomic_operations_concurrent_safety(empty_local_dir: str) -> None: """Test that temp files don't collide when multiple atomic writes happen""" # given target_path = os.path.join(empty_local_dir, "test.txt") - + # when - create multiple atomic writes to same target temp_paths = [] contexts = [] - + for i in range(3): ctx = AtomicPath(target_path, allow_override=True) temp_path = ctx.__enter__() temp_paths.append(temp_path) contexts.append(ctx) - + # then - all temp paths should be unique assert len(set(temp_paths)) == 3 - + # cleanup for ctx, temp_path in zip(contexts, temp_paths): try: diff --git a/tests/inference/unit_tests/models/test_owlv2_max_detections.py b/tests/inference/unit_tests/models/test_owlv2_max_detections.py index 7b69eaab10..86296d43ed 100644 --- a/tests/inference/unit_tests/models/test_owlv2_max_detections.py +++ b/tests/inference/unit_tests/models/test_owlv2_max_detections.py @@ -12,7 +12,9 @@ def test_infer_from_embed_respects_max_detections(monkeypatch): dtype=torch.float32, ) image_class_embeds = torch.zeros((4, 2)) - model.get_image_embeds = MagicMock(return_value=(None, image_boxes, image_class_embeds, None, None)) + model.get_image_embeds = MagicMock( + return_value=(None, image_boxes, image_class_embeds, None, None) + ) def fake_get_class_preds_from_embeds(*args, **kwargs): boxes = image_boxes @@ -20,8 +22,14 @@ def fake_get_class_preds_from_embeds(*args, **kwargs): scores = torch.tensor([0.9, 0.8, 0.7, 0.6]) return boxes, classes, scores - monkeypatch.setattr(owlv2, "get_class_preds_from_embeds", fake_get_class_preds_from_embeds) - monkeypatch.setattr(owlv2.torchvision.ops, "nms", lambda boxes, scores, iou: torch.arange(boxes.shape[0])) + monkeypatch.setattr( + owlv2, "get_class_preds_from_embeds", fake_get_class_preds_from_embeds + ) + monkeypatch.setattr( + owlv2.torchvision.ops, + "nms", + lambda boxes, scores, iou: torch.arange(boxes.shape[0]), + ) query_embeddings = {"a": {"positive": torch.zeros((1, 2)), "negative": None}} predictions = model.infer_from_embed( diff --git a/tests/inference/unit_tests/models/test_rfdetr.py b/tests/inference/unit_tests/models/test_rfdetr.py index 8082150448..4ac545672c 100644 --- a/tests/inference/unit_tests/models/test_rfdetr.py +++ b/tests/inference/unit_tests/models/test_rfdetr.py @@ -80,7 +80,17 @@ def test_sigmoid_stable_mixed_values(): result = model.sigmoid_stable(x) # then - expected = np.array([0.0, 0.0000453978687024, 0.2689414213699951, 0.5, 0.7310585786300049, 0.9999546021312976, 1]) + expected = np.array( + [ + 0.0, + 0.0000453978687024, + 0.2689414213699951, + 0.5, + 0.7310585786300049, + 0.9999546021312976, + 1, + ] + ) assert np.allclose(result, expected, atol=1e-15) diff --git a/tests/inference/unit_tests/usage_tracking/conftest.py b/tests/inference/unit_tests/usage_tracking/conftest.py index 6e55576f72..052db5d3ee 100644 --- a/tests/inference/unit_tests/usage_tracking/conftest.py +++ b/tests/inference/unit_tests/usage_tracking/conftest.py @@ -11,6 +11,7 @@ def usage_collector_with_mocked_threads(): This prevents the actual threads from starting during tests. """ import threading + original_thread = threading.Thread original_event = threading.Event @@ -19,6 +20,7 @@ def usage_collector_with_mocked_threads(): threading.Event = MagicMock() from inference.usage_tracking import collector as collector_module + importlib.reload(collector_module) usage_collector = collector_module.usage_collector diff --git a/tests/inference/unit_tests/usage_tracking/test_collector.py b/tests/inference/unit_tests/usage_tracking/test_collector.py index 401d6ce1a0..c1fca79917 100644 --- a/tests/inference/unit_tests/usage_tracking/test_collector.py +++ b/tests/inference/unit_tests/usage_tracking/test_collector.py @@ -895,7 +895,9 @@ def test_system_info_with_dedicated_deployment_id(usage_collector_with_mocked_th assert system_info[k] == v -def test_system_info_with_no_dedicated_deployment_id(usage_collector_with_mocked_threads): +def test_system_info_with_no_dedicated_deployment_id( + usage_collector_with_mocked_threads, +): # given system_info = usage_collector_with_mocked_threads.system_info( ip_address="w.x.y.z", hostname="hostname01" @@ -973,4 +975,9 @@ def test_func(api_key="test_key"): assert len(usage_collector._usage) == 1 assert "test_key" in usage_collector._usage assert "model:unknown" in usage_collector._usage["test_key"] - assert json.loads(usage_collector._usage["test_key"]["model:unknown"]["resource_details"]).get("error") == "test exception" + assert ( + json.loads( + usage_collector._usage["test_key"]["model:unknown"]["resource_details"] + ).get("error") + == "test exception" + ) diff --git a/tests/inference_sdk/unit_tests/http/test_client.py b/tests/inference_sdk/unit_tests/http/test_client.py index a38409d781..b24a72c350 100644 --- a/tests/inference_sdk/unit_tests/http/test_client.py +++ b/tests/inference_sdk/unit_tests/http/test_client.py @@ -2075,6 +2075,7 @@ async def test_infer_from_api_v1_async_when_request_succeed_for_object_detection "visualization": "aGVsbG8=", } + @mock.patch.object(client, "load_static_inference_input") def test_ocr_image_when_single_image_given_in_v1_mode( load_static_inference_input_mock: MagicMock, @@ -2152,7 +2153,9 @@ def test_ocr_image_when_trocr_selected_in_specific_variant( ) # when - result = http_client.ocr_image(inference_input="/some/image.jpg", model="trocr", version="trocr-small-printed") + result = http_client.ocr_image( + inference_input="/some/image.jpg", model="trocr", version="trocr-small-printed" + ) # then assert result == { @@ -2162,7 +2165,7 @@ def test_ocr_image_when_trocr_selected_in_specific_variant( assert requests_mock.request_history[0].json() == { "api_key": "my-api-key", "image": {"type": "base64", "value": "base64_image"}, - "trocr_version_id": "trocr-small-printed" + "trocr_version_id": "trocr-small-printed", }, "Request must contain API key and image encoded in standard format" @@ -2222,7 +2225,9 @@ async def test_ocr_image_async_when_trocr_selected( }, ) # when - result = await http_client.ocr_image_async(inference_input="/some/image.jpg", model="trocr") + result = await http_client.ocr_image_async( + inference_input="/some/image.jpg", model="trocr" + ) # then assert result == { @@ -2284,6 +2289,7 @@ async def test_ocr_image_async_when_trocr_selected_in_specific_variant( headers={"Content-Type": "application/json"}, ) + @mock.patch.object(client, "load_static_inference_input") def test_ocr_image_when_single_image_given_in_v0_mode( load_static_inference_input_mock: MagicMock, @@ -3656,14 +3662,13 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url).configure( - inference_configuration=InferenceConfiguration(profiling_directory=empty_directory) + inference_configuration=InferenceConfiguration( + profiling_directory=empty_directory + ) ) requests_mock.post( f"{api_url}{endpoint_to_use}", - json={ - "outputs": [{"some": 3}], - "profiler_trace": [{"my": "trace"}] - }, + json={"outputs": [{"some": 3}], "profiler_trace": [{"my": "trace"}]}, ) load_nested_batches_of_inference_input_mock.side_effect = [ ("base64_image_1", 0.5), @@ -3707,7 +3712,9 @@ def test_infer_from_workflow_when_usage_of_profiler_enabled( }, }, "Request payload must contain api key, inputs and no cache flag" json_files_in_profiling_directory = glob(os.path.join(empty_directory, "*.json")) - assert len(json_files_in_profiling_directory) == 1, "Expected to find one JSON file with profiler trace" + assert ( + len(json_files_in_profiling_directory) == 1 + ), "Expected to find one JSON file with profiler trace" with open(json_files_in_profiling_directory[0], "r") as f: data = json.load(f) assert data == [{"my": "trace"}], "Trace content must be fully saved" @@ -3754,13 +3761,7 @@ def test_infer_from_workflow_when_nested_batch_of_inputs_provided( result = method( workspace_name="my_workspace", images={"image_1": [["1", "2"], ["3", "4", "5"], ["6"]]}, - parameters={ - "batch_oriented_param": [ - ["a", "b"], - ["c", "d", "e"], - ["f"] - ] - }, + parameters={"batch_oriented_param": [["a", "b"], ["c", "d", "e"], ["f"]]}, **{parameter_name: "my_workflow"}, ) @@ -3929,9 +3930,11 @@ def test_list_inference_pipelines(requests_mock: Mocker) -> None: f"{api_url}/inference_pipelines/list", json={ "status": "success", - "context": {"request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", - "pipeline_id": None}, - "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"] + "context": { + "request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", + "pipeline_id": None, + }, + "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"], }, ) @@ -3941,12 +3944,15 @@ def test_list_inference_pipelines(requests_mock: Mocker) -> None: # then assert result == { "status": "success", - "context": {"request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", - "pipeline_id": None}, - "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"] + "context": { + "request_id": "52f5df39-b7de-4a56-8c42-b979d365cfa0", + "pipeline_id": None, + }, + "pipelines": ["acd62146-edca-4253-8eeb-40c88906cd70"], } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_list_inference_pipelines_on_auth_error(requests_mock: Mocker) -> None: @@ -3981,11 +3987,14 @@ def test_get_inference_pipeline_status(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" -def test_get_inference_pipeline_status_when_pipeline_id_empty(requests_mock: Mocker) -> None: +def test_get_inference_pipeline_status_when_pipeline_id_empty( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -3995,7 +4004,9 @@ def test_get_inference_pipeline_status_when_pipeline_id_empty(requests_mock: Moc _ = http_client.get_inference_pipeline_status(pipeline_id="") -def test_get_inference_pipeline_status_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_get_inference_pipeline_status_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4027,8 +4038,9 @@ def test_pause_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_pause_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4041,7 +4053,9 @@ def test_pause_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.pause_inference_pipeline(pipeline_id="") -def test_pause_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_pause_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4073,8 +4087,9 @@ def test_resume_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_resume_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4087,7 +4102,9 @@ def test_resume_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.resume_inference_pipeline(pipeline_id="") -def test_resume_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_resume_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4119,8 +4136,9 @@ def test_terminate_inference_pipeline(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key"}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key" + }, "Expected payload to contain API key" def test_terminate_inference_pipeline_when_pipeline_id_empty() -> None: @@ -4133,7 +4151,9 @@ def test_terminate_inference_pipeline_when_pipeline_id_empty() -> None: _ = http_client.terminate_inference_pipeline(pipeline_id="") -def test_terminate_inference_pipeline_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_terminate_inference_pipeline_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4168,8 +4188,10 @@ def test_consume_inference_pipeline_result(requests_mock: Mocker) -> None: assert result == { "status": "success", } - assert requests_mock.request_history[0].json() == {"api_key": "my-api-key", "excluded_fields": ["a"]}, \ - "Expected payload to contain API key" + assert requests_mock.request_history[0].json() == { + "api_key": "my-api-key", + "excluded_fields": ["a"], + }, "Expected payload to contain API key" def test_consume_inference_pipeline_result_when_pipeline_id_empty() -> None: @@ -4182,7 +4204,9 @@ def test_consume_inference_pipeline_result_when_pipeline_id_empty() -> None: _ = http_client.consume_inference_pipeline_result(pipeline_id="") -def test_consume_inference_pipeline_result_when_pipeline_id_not_found(requests_mock: Mocker) -> None: +def test_consume_inference_pipeline_result_when_pipeline_id_not_found( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4196,17 +4220,23 @@ def test_consume_inference_pipeline_result_when_pipeline_id_not_found(requests_m _ = http_client.consume_inference_pipeline_result(pipeline_id="my-pipeline") -def test_start_inference_pipeline_with_workflow_when_configuration_does_not_specify_workflow() -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_does_not_specify_workflow() -> ( + None +): # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) # when with pytest.raises(InvalidParameterError): - http_client.start_inference_pipeline_with_workflow(video_reference="rtsp://some/stream") + http_client.start_inference_pipeline_with_workflow( + video_reference="rtsp://some/stream" + ) -def test_start_inference_pipeline_with_workflow_when_configuration_does_over_specify_workflow() -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_does_over_specify_workflow() -> ( + None +): # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4221,7 +4251,9 @@ def test_start_inference_pipeline_with_workflow_when_configuration_does_over_spe ) -def test_start_inference_pipeline_with_workflow_when_configuration_is_valid(requests_mock: Mocker) -> None: +def test_start_inference_pipeline_with_workflow_when_configuration_is_valid( + requests_mock: Mocker, +) -> None: # given api_url = "http://some.com" http_client = InferenceHTTPClient(api_key="my-api-key", api_url=api_url) @@ -4270,5 +4302,3 @@ def test_start_inference_pipeline_with_workflow_when_configuration_is_valid(requ "results_buffer_size": 64, }, } - - diff --git a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py index 0004519611..af325fc663 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_loaders.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_loaders.py @@ -658,9 +658,7 @@ def test_load_nested_batches_of_inference_input_when_single_element_is_given( load_static_inference_input_mock: MagicMock, ) -> None: # given - load_static_inference_input_mock.side_effect = [ - ["image_1"] - ] + load_static_inference_input_mock.side_effect = [["image_1"]] # when result = load_nested_batches_of_inference_input( @@ -668,7 +666,9 @@ def test_load_nested_batches_of_inference_input_when_single_element_is_given( ) # then - assert result == "image_1", "Expected direct result from load_static_inference_input()" + assert ( + result == "image_1" + ), "Expected direct result from load_static_inference_input()" @mock.patch.object(loaders, "load_static_inference_input") @@ -679,7 +679,7 @@ def test_load_nested_batches_of_inference_input_when_1d_batch_is_given( load_static_inference_input_mock.side_effect = [ ["image_1"], ["image_2"], - ["image_3"] + ["image_3"], ] # when @@ -688,7 +688,11 @@ def test_load_nested_batches_of_inference_input_when_1d_batch_is_given( ) # then - assert result == ["image_1", "image_2", "image_3"], "Expected direct result from load_static_inference_input()" + assert result == [ + "image_1", + "image_2", + "image_3", + ], "Expected direct result from load_static_inference_input()" @mock.patch.object(loaders, "load_static_inference_input") diff --git a/tests/inference_sdk/unit_tests/http/utils/test_requests.py b/tests/inference_sdk/unit_tests/http/utils/test_requests.py index ddfe771f7e..13931821cd 100644 --- a/tests/inference_sdk/unit_tests/http/utils/test_requests.py +++ b/tests/inference_sdk/unit_tests/http/utils/test_requests.py @@ -160,7 +160,9 @@ def test_inject_nested_batches_of_images_into_payload_when_single_image_given() assert result == {"image": {"type": "base64", "value": "img1"}} -def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_given() -> None: +def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_given() -> ( + None +): # when result = inject_nested_batches_of_images_into_payload( payload={}, @@ -176,7 +178,9 @@ def test_inject_nested_batches_of_images_into_payload_when_1d_batch_of_images_gi } -def test_inject_nested_batches_of_images_into_payload_when_nested_batch_of_images_given() -> None: +def test_inject_nested_batches_of_images_into_payload_when_nested_batch_of_images_given() -> ( + None +): # when result = inject_nested_batches_of_images_into_payload( payload={}, diff --git a/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py b/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py index f351ca713a..612d63da7b 100644 --- a/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py +++ b/tests/workflows/integration_tests/compilation/stub_plugins/plugin_with_dimensionality_manipulation_blocks/__init__.py @@ -440,7 +440,7 @@ def get_manifest(cls) -> Type[WorkflowBlockManifest]: return DecreasingDimensionalityManifest def run(self, images: Batch[WorkflowImageData]) -> BlockResult: - pass + return {"output": len(images)} def load_blocks() -> List[Type[WorkflowBlock]]: diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py index f4ecf6df4b..5cf0106ea1 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_dimension_of_references.py @@ -221,8 +221,9 @@ def test_compilation_of_workflow_where_step_attempts_decreasing_dimensionality_t } # when - with pytest.raises(StepOutputLineageError): - _ = compile_workflow( - workflow_definition=WORKFLOW_ATTEMPTING_TO_REDUCE_DIM_TO_ZERO, - init_parameters=workflow_init_parameters, - ) + _ = compile_workflow( + workflow_definition=WORKFLOW_ATTEMPTING_TO_REDUCE_DIM_TO_ZERO, + init_parameters=workflow_init_parameters, + ) + + # then - no error diff --git a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py index cf0f0f0f58..f960d40f23 100644 --- a/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py +++ b/tests/workflows/integration_tests/compilation/test_compilation_of_workflow_with_invalid_plugin.py @@ -6,6 +6,7 @@ from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode from inference.core.workflows.errors import BlockInterfaceError +from inference.core.workflows.execution_engine.constants import TOP_LEVEL_LINEAGES_KEY from inference.core.workflows.execution_engine.introspection import blocks_loader from inference.core.workflows.execution_engine.v1.compiler.core import compile_workflow @@ -138,11 +139,15 @@ def test_compilation_of_workflow_where_block_is_not_simd_but_defines_output_offs } # when - with pytest.raises(BlockInterfaceError): - _ = compile_workflow( - workflow_definition=WORKFLOW_WITH_INVALID_BLOCK_DECLARING_OFFSET_BEING_NOT_SIMD, - init_parameters=workflow_init_parameters, - ) + compiled_workflow = compile_workflow( + workflow_definition=WORKFLOW_WITH_INVALID_BLOCK_DECLARING_OFFSET_BEING_NOT_SIMD, + init_parameters=workflow_init_parameters, + ) + + # then + assert compiled_workflow.execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] == { + "$steps.problematic_dimensions" + } WORKFLOW_WITH_INVALID_BLOCK_DECLARING_DIMENSIONALITY_REFERENCE_PROPERTY_AS_NON_BATCH = { diff --git a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py index 974e11d7ab..414dfcfb65 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/dimensionality_manipulation_plugin/detections_to_parent_coordinates_batch.py @@ -103,14 +103,10 @@ def run( ) -> BlockResult: result = [] for i, (image, image_predictions) in enumerate(zip(images, images_predictions)): - print("Processing image", i) parent_id = image.parent_metadata.parent_id parent_coordinates = image.parent_metadata.origin_coordinates transformed_predictions = [] for j, prediction in enumerate(image_predictions): - print( - f"Processing prediction {j} - start {len(prediction)} - {prediction['parent_id']}" - ) prediction_copy = deepcopy(prediction) prediction_copy["parent_id"] = np.array([parent_id] * len(prediction)) if parent_coordinates: @@ -125,9 +121,6 @@ def run( prediction_copy["parent_dimensions"] = np.array( [dimensions] * len(prediction) ) - print( - f"Processing prediction {j} - end {len(prediction_copy)} - {prediction_copy['parent_id']}" - ) transformed_predictions.append({"predictions": prediction_copy}) result.append(transformed_predictions) return result diff --git a/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py new file mode 100644 index 0000000000..c9e645ec3d --- /dev/null +++ b/tests/workflows/integration_tests/execution/stub_plugins/plugin_image_producer/__init__.py @@ -0,0 +1,988 @@ +import json +from collections import defaultdict +from typing import Any, Dict, List, Literal, Optional, Tuple, Type, Union +from uuid import uuid4 + +import numpy as np +from pydantic import Field + +from inference.core.workflows.execution_engine.entities.base import ( + Batch, + ImageParentMetadata, + OutputDefinition, + WorkflowImageData, +) +from inference.core.workflows.execution_engine.entities.types import ( + IMAGE_KIND, + STRING_KIND, + Selector, + StepSelector, +) +from inference.core.workflows.execution_engine.v1.entities import FlowControl +from inference.core.workflows.prototypes.block import ( + BlockResult, + WorkflowBlock, + WorkflowBlockManifest, +) + + +class ImageProducerBlockManifest(WorkflowBlockManifest): + type: Literal["ImageProducer"] + shape: Tuple[int, int, int] = Field(default=(192, 168, 3)) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="image", kind=[IMAGE_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class ImageProducerBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return ImageProducerBlockManifest + + def run(self, shape: Tuple[int, int, int]) -> BlockResult: + image = WorkflowImageData( + parent_metadata=ImageParentMetadata(parent_id=f"image_producer.{uuid4()}"), + numpy_image=np.zeros(shape, dtype=np.uint8), + ) + return {"image": image} + + +class SingleImageConsumerManifest(WorkflowBlockManifest): + type: Literal["ImageConsumer"] + images: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images"] + + +class SingleImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SingleImageConsumerManifest + + def run(self, images: Batch[WorkflowImageData]) -> BlockResult: + results = [] + for image in images: + results.append({"shapes": json.dumps(image.numpy_image.shape)}) + return results + + +class SingleImageConsumerNonSIMDManifest(WorkflowBlockManifest): + type: Literal["ImageConsumerNonSIMD"] + images: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class SingleImageConsumerNonSIMD(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SingleImageConsumerNonSIMDManifest + + def run(self, images: WorkflowImageData) -> BlockResult: + return {"shapes": json.dumps(images.numpy_image.shape)} + + +class MultiSIMDImageConsumerManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumer"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="metadata", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images_x", "images_y"] + + +class MultiSIMDImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerManifest + + def run( + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + ) -> BlockResult: + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append( + { + "metadata": json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + } + ) + return results + + +class MultiImageConsumerManifest(WorkflowBlockManifest): + type: Literal["MultiImageConsumer"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class MultiImageConsumer(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiImageConsumerManifest + + def run( + self, images_x: WorkflowImageData, images_y: WorkflowImageData + ) -> BlockResult: + return { + "shapes": json.dumps(images_x.numpy_image.shape) + + json.dumps(images_y.numpy_image.shape) + } + + +class MultiImageConsumerRaisingDimManifest(WorkflowBlockManifest): + type: Literal["MultiImageConsumerRaisingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class MultiImageConsumerRaisingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiImageConsumerRaisingDimManifest + + def run( + self, images_x: WorkflowImageData, images_y: WorkflowImageData + ) -> BlockResult: + return [ + { + "shapes": json.dumps(images_x.numpy_image.shape) + + json.dumps(images_y.numpy_image.shape) + } + ] + + +class MultiSIMDImageConsumerRaisingDimManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumerRaisingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches_and_scalars(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class MultiSIMDImageConsumerRaisingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerRaisingDimManifest + + def run( + self, images_x: Batch[WorkflowImageData], images_y: Batch[WorkflowImageData] + ) -> BlockResult: + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append( + [ + { + "shapes": json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + } + ] + ) + return results + + +class MultiNonSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): + type: Literal["MultiNonSIMDImageConsumerDecreasingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + additional: Union[Selector(), float] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + +class MultiNonSIMDImageConsumerDecreasingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiNonSIMDImageConsumerDecreasingDimManifest + + def run( + self, + images_x: Batch[WorkflowImageData], + images_y: Batch[WorkflowImageData], + additional: Any, + ) -> BlockResult: + assert not isinstance(additional, Batch) + results = [] + for image_x, image_y in zip(images_x, images_y): + results.append( + json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + ) + return {"shapes": "\n".join(results)} + + +class MultiSIMDImageConsumerDecreasingDimManifest(WorkflowBlockManifest): + type: Literal["MultiSIMDImageConsumerDecreasingDim"] + images_x: Selector(kind=[IMAGE_KIND]) + images_y: Selector(kind=[IMAGE_KIND]) + additional: Union[Selector(), float] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="shapes", kind=[STRING_KIND])] + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["images_x", "images_y"] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return -1 + + +class MultiSIMDImageConsumerDecreasingDim(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return MultiSIMDImageConsumerDecreasingDimManifest + + def run( + self, + images_x: Batch[Batch[WorkflowImageData]], + images_y: Batch[Batch[WorkflowImageData]], + additional: Any, + ) -> BlockResult: + assert not isinstance(additional, Batch) + results = [] + for image_x_batch, image_y_batch in zip(images_x, images_y): + result = [] + for image_x, image_y in zip(image_x_batch, image_y_batch): + result.append( + json.dumps(image_x.numpy_image.shape) + + json.dumps(image_y.numpy_image.shape) + ) + results.append({"shapes": "\n".join(result)}) + return results + + +class IdentityManifest(WorkflowBlockManifest): + type: Literal["Identity"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class IdentityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return IdentityManifest + + def run(self, x: Any) -> BlockResult: + return {"x": x} + + +class IdentitySIMDManifest(WorkflowBlockManifest): + type: Literal["IdentitySIMD"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class IdentitySIMDBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return IdentitySIMDManifest + + def run(self, x: Batch[Any]) -> BlockResult: + assert isinstance(x, Batch) + return [{"x": x_el} for x_el in x] + + +class BoostDimensionalityManifest(WorkflowBlockManifest): + type: Literal["BoostDimensionality"] + x: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class BoostDimensionalityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return BoostDimensionalityManifest + + def run(self, x: Any) -> BlockResult: + return [{"x": x}, {"x": x}] + + +class DoubleBoostDimensionalityManifest(WorkflowBlockManifest): + type: Literal["DoubleBoostDimensionality"] + x: Selector() + y: Selector() + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset(cls) -> int: + return 1 + + +class DoubleBoostDimensionalityBlock(WorkflowBlock): + + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return DoubleBoostDimensionalityManifest + + def run(self, x: Any, y: Any) -> BlockResult: + return [{"x": x, "y": y}, {"x": x, "y": y}] + + +class NonSIMDConsumerAcceptingListManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingList"] + x: List[Selector(kind=[IMAGE_KIND])] + y: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class NonSIMDConsumerAcceptingListBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListManifest + + def run(self, x: list, y: list) -> BlockResult: + return {"x": x, "y": y} + + +class SIMDConsumerAcceptingListManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingList"] + x: List[Selector(kind=[IMAGE_KIND])] + y: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x"), OutputDefinition(name="y")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x", "y"] + + +class SIMDConsumerAcceptingListBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingListManifest + + def run( + self, x: List[Batch[WorkflowImageData]], y: List[Batch[WorkflowImageData]] + ) -> BlockResult: + idx2x = defaultdict(list) + idx2y = defaultdict(list) + for batch_x in x: + for idx, el in enumerate(batch_x): + idx2x[idx].append(el) + for batch_y in y: + for idx, el in enumerate(batch_y): + idx2y[idx].append(el) + indices_x = sorted(idx2x.keys()) + indices_y = sorted(idx2y.keys()) + assert indices_x == indices_y + results = [] + for idx in indices_x: + results.append({"x": idx2x[idx], "y": idx2y[idx]}) + return results + + +class NonSIMDConsumerAcceptingDictManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDict"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class NonSIMDConsumerAcceptingDictBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictManifest + + def run(self, x: dict) -> BlockResult: + sorted_keys = sorted(x.keys()) + return {"x": [x[k] for k in sorted_keys]} + + +class SIMDConsumerAcceptingDictManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDict"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictManifest + + def run(self, x: Dict[str, Batch[Any]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + keys_stashes[key][idx] = key_batch_el + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + results.append({"x": [keys_stashes[k][idx] for k in sorted_keys]}) + return results + + +class NonSIMDConsumerAcceptingListIncDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingListIncDim"] + x: List[Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + +class NonSIMDConsumerAcceptingListIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListIncDimManifest + + def run(self, x: list) -> BlockResult: + return [{"x": x}, {"x": x}] + + +class NonSIMDConsumerAcceptingDictIncDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDictIncDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + +class NonSIMDConsumerAcceptingDictIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictIncDimManifest + + def run(self, x: dict) -> BlockResult: + sorted_keys = sorted(x.keys()) + return [{"x": [x[k] for k in sorted_keys]}, {"x": [x[k] for k in sorted_keys]}] + + +class SIMDConsumerAcceptingDictIncDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDictIncDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return 1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictIncDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictIncDimManifest + + def run(self, x: Dict[str, Batch[Any]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + keys_stashes[key][idx] = key_batch_el + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + results.append( + [ + {"x": [keys_stashes[k][idx] for k in sorted_keys]}, + {"x": [keys_stashes[k][idx] for k in sorted_keys]}, + ] + ) + return results + + +class NonSIMDConsumerAcceptingListDecDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingListDecDim"] + x: List[Selector(kind=[IMAGE_KIND])] + y: Union[Selector(), str] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["x"] + + +class NonSIMDConsumerAcceptingListDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingListDecDimManifest + + def run(self, x: Batch[list], y: str) -> BlockResult: + assert not isinstance(y, Batch) + return {"x": [f for e in x for f in e]} + + +class SIMDConsumerAcceptingListDecDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingListDecDim"] + x: List[Selector(kind=[IMAGE_KIND])] + y: Union[Selector(), str] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingListDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingListDecDimManifest + + def run(self, x: List[Batch[Batch[WorkflowImageData]]], y: str) -> BlockResult: + assert not isinstance(y, Batch) + idx2x = defaultdict(list) + for batch_x in x: + for idx, el in enumerate(batch_x): + idx2x[idx].extend(list(el)) + indices_x = sorted(idx2x.keys()) + results = [] + for idx in indices_x: + results.append({"x": idx2x[idx]}) + return results + + +class NonSIMDConsumerAcceptingDictDecDimManifest(WorkflowBlockManifest): + type: Literal["NonSIMDConsumerAcceptingDictDecDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_enforcing_auto_batch_casting(cls) -> List[str]: + return ["x"] + + +class NonSIMDConsumerAcceptingDictDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return NonSIMDConsumerAcceptingDictDecDimManifest + + def run(self, x: dict) -> BlockResult: + results = [] + sorted_keys = sorted(x.keys()) + for k in sorted_keys: + v = x[k] + assert isinstance(v, Batch) + result = [e for e in v] + results.append(result) + return {"x": results} + + +class SIMDConsumerAcceptingDictDecDimManifest(WorkflowBlockManifest): + type: Literal["SIMDConsumerAcceptingDictDecDim"] + x: Dict[str, Selector(kind=[IMAGE_KIND])] + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [OutputDefinition(name="x")] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + @classmethod + def get_output_dimensionality_offset( + cls, + ) -> int: + return -1 + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + +class SIMDConsumerAcceptingDictDecDimBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return SIMDConsumerAcceptingDictDecDimManifest + + def run(self, x: Dict[str, Batch[Batch[Any]]]) -> BlockResult: + sorted_keys = sorted(x.keys()) + keys_stashes = {k: {} for k in sorted_keys} + for key, key_batch in x.items(): + assert isinstance(key_batch, Batch) + for idx, key_batch_el in enumerate(key_batch): + assert isinstance(key_batch_el, Batch) + keys_stashes[key][idx] = list(key_batch_el) + reference_indices = None + for stash in keys_stashes.values(): + sorted_idx = sorted(stash.keys()) + if reference_indices is None: + reference_indices = sorted_idx + assert sorted_idx == reference_indices + assert reference_indices is not None + results = [] + for idx in reference_indices: + merged = [] + for k in sorted_keys: + merged.append(keys_stashes[k][idx]) + results.append({"x": merged}) + return results + + +class AlwaysTerminateManifest(WorkflowBlockManifest): + type: Literal["AlwaysTerminate"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class AlwaysTerminateBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return AlwaysTerminateManifest + + def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: + return FlowControl(mode="terminate_branch") + + +class AlwaysPassManifest(WorkflowBlockManifest): + type: Literal["AlwaysPass"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_parameters_accepting_batches(cls) -> List[str]: + return ["x"] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class AlwaysPassBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return AlwaysPassManifest + + def run(self, x: Batch[Any], next_steps: List[StepSelector]) -> BlockResult: + assert isinstance(x, Batch) + results = [] + for _ in x: + results.append(FlowControl(mode="select_step", context=next_steps)) + return results + + +class EachSecondPassManifest(WorkflowBlockManifest): + type: Literal["EachSecondPass"] + x: Union[Selector(), Any] + next_steps: List[StepSelector] = Field( + description="Steps to execute if the condition evaluates to true.", + examples=[["$steps.on_true"]], + ) + + @classmethod + def describe_outputs(cls) -> List[OutputDefinition]: + return [] + + @classmethod + def get_execution_engine_compatibility(cls) -> Optional[str]: + return ">=1.3.0,<2.0.0" + + +class EachSecondPassBlock(WorkflowBlock): + @classmethod + def get_manifest(cls) -> Type[WorkflowBlockManifest]: + return EachSecondPassManifest + + def __init__(self): + self._last_passed = False + + def run(self, x: Any, next_steps: List[StepSelector]) -> BlockResult: + if self._last_passed: + self._last_passed = False + return FlowControl(mode="terminate_branch") + self._last_passed = True + return FlowControl(mode="select_step", context=next_steps) + + +def load_blocks() -> List[Type[WorkflowBlock]]: + return [ + ImageProducerBlock, + SingleImageConsumer, + SingleImageConsumerNonSIMD, + MultiSIMDImageConsumer, + MultiImageConsumer, + MultiImageConsumerRaisingDim, + MultiSIMDImageConsumerRaisingDim, + IdentityBlock, + IdentitySIMDBlock, + MultiNonSIMDImageConsumerDecreasingDim, + MultiSIMDImageConsumerDecreasingDim, + BoostDimensionalityBlock, + DoubleBoostDimensionalityBlock, + NonSIMDConsumerAcceptingListBlock, + NonSIMDConsumerAcceptingDictBlock, + NonSIMDConsumerAcceptingListIncDimBlock, + NonSIMDConsumerAcceptingDictIncDimBlock, + NonSIMDConsumerAcceptingListDecDimBlock, + NonSIMDConsumerAcceptingDictDecDimBlock, + SIMDConsumerAcceptingListBlock, + SIMDConsumerAcceptingDictBlock, + SIMDConsumerAcceptingDictIncDimBlock, + SIMDConsumerAcceptingDictDecDimBlock, + SIMDConsumerAcceptingListDecDimBlock, + AlwaysTerminateBlock, + AlwaysPassBlock, + EachSecondPassBlock, + ] diff --git a/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py b/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py index eed0130a29..bebe4e1dac 100644 --- a/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py +++ b/tests/workflows/integration_tests/execution/stub_plugins/rock_paper_scissor_plugin/expression.py @@ -65,6 +65,4 @@ def run( params = ", ".join(f"{k}={k}" for k in data) code = output.code + f"\n\nresult = function({params})" exec(code, data, results) - result = {"output": results["result"]} - print("result", result) - return result + return {"output": results["result"]} diff --git a/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py new file mode 100644 index 0000000000..7c2f4b9886 --- /dev/null +++ b/tests/workflows/integration_tests/execution/test_plugins_enforcing_scalars_to_fit_into_batch_parameters.py @@ -0,0 +1,4507 @@ +from unittest import mock +from unittest.mock import MagicMock + +import numpy as np +import pytest + +from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS +from inference.core.managers.base import ModelManager +from inference.core.workflows.core_steps.common.entities import StepExecutionMode +from inference.core.workflows.errors import AssumptionError +from inference.core.workflows.execution_engine.core import ExecutionEngine +from inference.core.workflows.execution_engine.introspection import blocks_loader + +WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer", + }, + { + "type": "ImageConsumer", + "name": "image_consumer", + "images": "$steps.image_producer.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_batch_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": "[192, 168, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_NON_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer", + }, + { + "type": "ImageConsumerNonSIMD", + "name": "image_consumer", + "images": "$steps.image_producer.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_producing_image_and_consuming_it_in_block_accepting_single_non_simd_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_SINGLE_IMAGE_NON_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": "[192, 168, 3]"}] + + +WORKFLOW_SINGLE_IMAGE_SIMD_CONSUMER_FROM_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + {"type": "ImageConsumer", "name": "image_consumer", "images": "$inputs.image"} + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_consuming_input_image_in_block_accepting_single_non_simd_input( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_SINGLE_IMAGE_SIMD_CONSUMER_FROM_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image = np.zeros((240, 230, 3), dtype=np.uint8) + + # when + result = execution_engine.run(runtime_parameters={"image": image}) + + # then + assert result == [{"shapes": "[240, 230, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (240, 230, 3)}, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producers_outputting_scalar_images( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"metadata": "[192, 168, 3][240, 230, 3]"}] + + +WORKFLOW_IMAGE_PRODUCER_AND_INPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producer_and_input_images_batch( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_AND_INPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + input_images = [ + np.zeros((192, 192, 3), dtype=np.uint8), + np.zeros((200, 192, 3), dtype=np.uint8), + np.zeros((300, 192, 3), dtype=np.uint8), + ] + # when + result = execution_engine.run(runtime_parameters={"image": input_images}) + + # then + assert result == [ + {"metadata": "[192, 168, 3][192, 192, 3]"}, + {"metadata": "[192, 168, 3][200, 192, 3]"}, + {"metadata": "[192, 168, 3][300, 192, 3]"}, + ] + + +WORKFLOW_IMAGE_PRODUCER_AND_STEP_OUTPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image", + }, + { + "type": "MultiSIMDImageConsumer", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.identity_simd.x", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "metadata", + "selector": "$steps.image_consumer.metadata", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_image_consumers_consuming_images_generated_by_image_producer_and_another_simd_block( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_IMAGE_PRODUCER_AND_STEP_OUTPUT_IMAGES_COMBINED_WITH_MULTIPLE_IMAGES_SIMD_CONSUMER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + input_images = [ + np.zeros((192, 192, 3), dtype=np.uint8), + np.zeros((200, 192, 3), dtype=np.uint8), + np.zeros((300, 192, 3), dtype=np.uint8), + ] + # when + result = execution_engine.run(runtime_parameters={"image": input_images}) + + # then + assert result == [ + {"metadata": "[192, 168, 3][192, 192, 3]"}, + {"metadata": "[192, 168, 3][200, 192, 3]"}, + {"metadata": "[192, 168, 3][300, 192, 3]"}, + ] + + +WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumer", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_data_into_scalar_consumer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": "[192, 168, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumer", + "name": "image_consumer", + "images_x": "$inputs.image", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_data_into_scalar_consumer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SCALAR_MULTI_IMAGE_CONSUMER_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image = np.zeros((200, 400, 3), dtype=np.uint8) + + # when + result = execution_engine.run(runtime_parameters={"image": image}) + + # then + assert result == [{"shapes": "[200, 400, 3][220, 230, 3]"}] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": ["[192, 168, 3][220, 230, 3]"]}] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [{"type": "WorkflowImage", "name": "image"}], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + + # when + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) + + # then + assert result == [ + {"shapes": ["[192, 168, 3][200, 100, 3]"]}, + {"shapes": ["[192, 168, 3][300, 100, 3]"]}, + ] + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiNonSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result[0]["shapes"] == "[192, 168, 3][220, 230, 3]" + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiNonSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image", + "additional": "$inputs.confidence", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_non_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # then + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) + + # then + assert result == [ + {"shapes": "[192, 168, 3][200, 100, 3]\n[192, 168, 3][300, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result[0]["shapes"] == "[192, 168, 3][220, 230, 3]" + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image", + "additional": "$inputs.confidence", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_simd_consumer_decreasing_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={"image": [image_1, image_2]}) + + # then + assert result == [ + {"shapes": "[192, 168, 3][200, 100, 3]\n[192, 168, 3][300, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "Identity", + "name": "identity_non_simd", + "x": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.identity_non_simd.x", + "additional": "$inputs.confidence", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_dimensionality( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + image_3 = np.zeros((400, 100, 3), dtype=np.uint8) + image_4 = np.zeros((500, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + } + ) + + # then + assert result == [ + {"shapes": "[200, 100, 3][400, 100, 3]\n[300, 100, 3][500, 100, 3]"} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "Identity", + "name": "identity_non_simd", + "x": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.identity_non_simd.x", + "additional": "$inputs.confidence", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.identity_simd_2.x", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dimensionality_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_1_fed_into_consumer_decreasing_the_dimensionality_and_boosting_scalar_dim_at_the_end( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_1_BOOSTING_DIM_AT_THE_END, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + image_1 = [ + np.zeros((192, 168, 3), dtype=np.uint8), + np.zeros((292, 168, 3), dtype=np.uint8), + ] + image_2 = [ + np.zeros((392, 168, 3), dtype=np.uint8), + np.zeros((492, 168, 3), dtype=np.uint8), + ] + + # when + results = execution_engine.run( + { + "image_1": image_1, + "image_2": image_2, + } + ) + + # then + assert results == [ + { + "shapes": [ + "[192, 168, 3][392, 168, 3]\n[292, 168, 3][492, 168, 3]", + "[192, 168, 3][392, 168, 3]\n[292, 168, 3][492, 168, 3]", + ] + } + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_2", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.dimensionality_boost.x", + "images_y": "$steps.dimensionality_boost.y", + "additional": "$inputs.confidence", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batched_inputs_at_dim_2_fed_into_consumer_decreasing_the_dimensionality( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + image_1 = np.zeros((200, 100, 3), dtype=np.uint8) + image_2 = np.zeros((300, 100, 3), dtype=np.uint8) + image_3 = np.zeros((400, 100, 3), dtype=np.uint8) + image_4 = np.zeros((500, 100, 3), dtype=np.uint8) + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_BATCH_INPUTS_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [image_1, image_2], + "image_2": [image_3, image_4], + } + ) + + # then + assert result == [ + {"shapes": "[200, 100, 3][400, 100, 3]\n[200, 100, 3][400, 100, 3]"}, + {"shapes": "[300, 100, 3][500, 100, 3]\n[300, 100, 3][500, 100, 3]"}, + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_INPUTS_BOOSTING_DIM_AT_THE_END = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowParameter", "name": "confidence", "default_value": 0.3}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + }, + { + "type": "MultiSIMDImageConsumerDecreasingDim", + "name": "image_consumer", + "images_x": "$steps.image_producer_x.image", + "images_y": "$steps.image_producer_y.image", + "additional": "$inputs.confidence", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.identity_simd_2.x", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dimensionality_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_inputs_fed_into_consumer_decreasing_the_dimensionality_and_boosting_scalar_dim_at_the_end( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_DECREASING_OUTPUT_DIM_FED_BY_SCALAR_INPUTS_BOOSTING_DIM_AT_THE_END, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert results == [ + {"shapes": ["[192, 168, 3][192, 168, 3]", "[192, 168, 3][192, 168, 3]"]}, + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (100, 100, 3)}, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (200, 200, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_z", + "shape": (300, 300, 3), + }, + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": ["$steps.image_producer_z.image"], + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.image_consumer.x", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.dimensionality_boost.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert ( + len(results) == 1 + ), "Expected dim increase to happen, but should be nested according to how we treat emergent dimensions" + assert len(results[0]["x"]) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (200, 200, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (200, 200, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$inputs.image_2"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_batch_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (220, 220, 3), + ] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_list_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumers_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingDictIncDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (100, 100, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [ + (320, 320, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "NonSIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.image_producer_x.image", + "b": "$steps.image_producer_y.image", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (100, 100, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (320, 320, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "NonSIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_non_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (100, 100, 3)}, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (200, 200, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_z", + "shape": (300, 300, 3), + }, + { + "type": "SIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": ["$steps.image_producer_z.image"], + }, + { + "type": "BoostDimensionality", + "name": "dimensionality_boost", + "x": "$steps.image_consumer.x", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.dimensionality_boost.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert ( + len(results) == 1 + ), "Expected dim increase to happen, but in artificially nested dim" + assert len(results[0]["x"]) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (200, 200, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (200, 200, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "SIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$inputs.image_2"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_batch_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_2": [np.zeros((200, 200, 3)), np.zeros((220, 220, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (200, 200, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (220, 220, 3), + ] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingList", + "name": "image_consumer", + "x": ["$inputs.image_1", "$steps.image_producer_x.image"], + "y": ["$inputs.image_3"], + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + { + "type": "JsonField", + "name": "y", + "selector": "$steps.image_consumer.y", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_list_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["y"]] == [(300, 300, 3)] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["y"]] == [(320, 320, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selector( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingDict", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumers_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingDictIncDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_inc_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_INCREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][0][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][0]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1][1]] == [ + (100, 100, 3), + (50, 50, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][0]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1][1]] == [ + (120, 120, 3), + (50, 50, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.dimensionality_boost.x", + "b": "$steps.image_producer_x.image", + "c": "$steps.dimensionality_boost.y", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (100, 100, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][0]] == [ + (120, 120, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"][2]] == [ + (320, 320, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$inputs.image_1", + "b": "$steps.image_producer_x.image", + "c": "$inputs.image_3", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [ + (100, 100, 3), + (120, 120, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [ + (50, 50, 3), + (50, 50, 3), + ] + assert [i.numpy_image.shape for i in results[0]["x"][2]] == [ + (300, 300, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingDictDecDim", + "name": "image_consumer", + "x": { + "a": "$steps.image_producer_x.image", + "b": "$steps.image_producer_y.image", + }, + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_dict_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_DICT_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"][0]] == [(50, 50, 3)] + assert [i.numpy_image.shape for i in results[0]["x"][1]] == [(60, 60, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (100, 100, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (300, 300, 3), + ] + assert [i.numpy_image.shape for i in results[1]["x"]] == [ + (120, 120, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (320, 320, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [ + (100, 100, 3), + (120, 120, 3), + (50, 50, 3), + (50, 50, 3), + (300, 300, 3), + (320, 320, 3), + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selectors( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_SCALARS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "AlwaysTerminate", + "name": "condition", + "x": "dummy", + "next_steps": ["$steps.image_producer_x"], + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_blocking_simd_producer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "AlwaysPass", + "name": "condition", + "x": "dummy", + "next_steps": ["$steps.image_producer_x"], + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_passing_simd_producer( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER_AFTER_PRODUCTION = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "AlwaysTerminate", + "name": "condition", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_blocking_simd_producer_after_production( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_BLOCKING_SIMD_PRODUCER_AFTER_PRODUCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER_AFTER_PRODUCTION = { + "version": "1.1", + "inputs": [], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "AlwaysPass", + "name": "condition", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + {"type": "ImageProducer", "name": "image_producer_y", "shape": (60, 60, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": ["$steps.image_producer_x.image", "$steps.image_producer_y.image"], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_always_passing_simd_producer_after_production( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=TEST_WORKFLOW_WITH_FLOW_CONTROL_PASSING_SIMD_PRODUCER_AFTER_PRODUCTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={}) + + # then + assert len(results) == 1 + assert [i.numpy_image.shape for i in results[0]["x"]] == [(50, 50, 3), (60, 60, 3)] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_FLOW_CONTROLL = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_flow_controll( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_FLOW_CONTROLL, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "AlwaysPass", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_multi_flow_controll_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + {"type": "WorkflowParameter", "name": "some", "default_value": 39}, + ], + "steps": [ + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "EachSecondPass", + "name": "condition", + "x": "$inputs.image_1", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$inputs.image_1", + "$steps.image_producer_x.image", + "$inputs.image_3", + "$inputs.some", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_1_and_multi_flow_controll_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_1_AND_MULTI_FLOW_CONTROLL_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 1 + assert results[0]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_0 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"], + }, + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "EachSecondPass", + "name": "condition_scalar", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + assert [ + i.numpy_image.shape if i is not None else None for i in results[1]["x"] + ] == [ + (120, 120, 3), + None, + (50, 50, 3), + None, + (320, 320, 3), + None, + ] + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_0 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "AlwaysTerminate", + "name": "condition_scalar", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "EachSecondPass", + "name": "condition_batch", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2_and_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert results[0]["x"] is None + assert results[1]["x"] is None + + +WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_1 = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_3"}, + ], + "steps": [ + { + "type": "EachSecondPass", + "name": "condition_batch_1", + "x": "$inputs.image_1", + "next_steps": ["$steps.dimensionality_boost"], + }, + { + "type": "DoubleBoostDimensionality", + "name": "dimensionality_boost", + "x": "$inputs.image_1", + "y": "$inputs.image_3", + }, + {"type": "ImageProducer", "name": "image_producer_x", "shape": (50, 50, 3)}, + { + "type": "EachSecondPass", + "name": "condition_batch_2", + "x": "$steps.dimensionality_boost.x", + "next_steps": ["$steps.image_consumer"], + }, + { + "type": "SIMDConsumerAcceptingListDecDim", + "name": "image_consumer", + "x": [ + "$steps.dimensionality_boost.x", + "$steps.image_producer_x.image", + "$steps.dimensionality_boost.y", + ], + "y": "some-value", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "x", + "selector": "$steps.image_consumer.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_simd_consumer_dec_dim_accepting_list_of_batch_and_scalar_selector_when_batch_at_dim_2_with_flow_controll_at_dim_2_and_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CUSTOMER_DECREASING_DIMENSIONALITY_ACCEPTING_LIST_OF_BATCH_AND_SCALAR_IMAGES_AT_DIM_2_WITH_FLOW_CONTROL_AT_DIM_2_AND_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((100, 100, 3)), np.zeros((120, 120, 3))], + "image_3": [np.zeros((300, 300, 3)), np.zeros((320, 320, 3))], + } + ) + + # then + assert len(results) == 2 + assert [ + i.numpy_image.shape if i is not None else None for i in results[0]["x"] + ] == [ + (100, 100, 3), + None, + (50, 50, 3), + None, + (300, 300, 3), + None, + ] + assert results[1]["x"] is None + + +WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_WITH_CONDITIONAL_EXECUTION = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_consumer.shapes", + "next_steps": ["$steps.identity_simd_2"], + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_non_simd_consumer_raising_dim_with_conditional_execution( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_WITH_CONDITIONAL_EXECUTION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": [None]}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": ["[192, 168, 3][220, 230, 3]"]}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_TWICE_FED_BY_SCALAR_PRODUCERS = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "BoostDimensionality", + "name": "dim_boost", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.dim_boost.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_twice( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_TWICE_FED_BY_SCALAR_PRODUCERS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [ + {"shapes": [["[192, 168, 3][220, 230, 3]", "[192, 168, 3][220, 230, 3]"]]} + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_0 = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_producer_x.image", + "next_steps": ["$steps.identity_simd"], + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_and_flow_control_at_dim_0( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_0, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": []}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_1 = { + "version": "1.1", + "inputs": [], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (220, 230, 3), + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "AlwaysTerminate", + "name": "condition_batch_2", + "x": "$steps.image_consumer.shapes", + "next_steps": ["$steps.identity_simd_2"], + }, + { + "type": "IdentitySIMD", + "name": "identity_simd_2", + "x": "$steps.image_consumer.shapes", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.identity_simd_2.x", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_multiple_scalar_producers_feeding_simd_consumer_raising_dim_and_flow_control_at_dim_1( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCERS_AND_FLOW_CONTROL_AT_DIM_1, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={}) + + # then + assert result == [{"shapes": [None]}] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + ], + "steps": [ + { + "type": "ImageProducer", + "name": "image_producer_x", + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image_1", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_scalar_producer_and_batch_input_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_SCALAR_PRODUCER_AND_BATCH_INPUT, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((292, 168, 3)), np.zeros((392, 168, 3))] + } + ) + + # then + assert result == [ + {"shapes": ["[192, 168, 3][292, 168, 3]"]}, + {"shapes": ["[192, 168, 3][392, 168, 3]"]}, + ] + + +WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_BATCH_INPUTS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image_1"}, + {"type": "WorkflowImage", "name": "image_2"}, + ], + "steps": [ + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$inputs.image_1", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$inputs.image_2", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_batch_inputs_feeding_simd_consumer_raising_dim( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": None, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_SIMD_CONSUMER_RAISING_OUTPUT_DIM_FED_BY_BATCH_INPUTS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image_1": [np.zeros((292, 168, 3)), np.zeros((392, 168, 3))], + "image_2": [np.zeros((293, 168, 3)), np.zeros((393, 168, 3))], + } + ) + + # then + assert result == [ + {"shapes": ["[292, 168, 3][293, 168, 3]"]}, + {"shapes": ["[392, 168, 3][393, 168, 3]"]}, + ] + + +WORKFLOW_WITH_INPUTS_DERIVED_NESTED_DIMS_AND_EMERGED_NESTED_DIMS = { + "version": "1.1", + "inputs": [ + {"type": "WorkflowImage", "name": "image"}, + ], + "steps": [ + { + "type": "ObjectDetectionModel", + "name": "general_detection", + "image": "$inputs.image", + "model_id": "yolov8n-640", + "class_filter": ["dog"], + }, + { + "type": "Crop", + "name": "cropping", + "image": "$inputs.image", + "predictions": "$steps.general_detection.predictions", + }, + { + "type": "EachSecondPass", + "name": "condition_batch_1", + "x": "$steps.cropping.crops", + "next_steps": ["$steps.breds_classification"], + }, + { + "type": "ClassificationModel", + "name": "breds_classification", + "image": "$steps.cropping.crops", + "model_id": "dog-breed-xpaq6/1", + "confidence": 0.09, + }, + { + "type": "ImageProducer", + "name": "image_producer_x", + "shape": (192, 168, 3), + }, + { + "type": "ImageProducer", + "name": "image_producer_y", + "shape": (292, 168, 3), + }, + { + "type": "IdentitySIMD", + "name": "identity_simd", + "x": "$steps.image_producer_x.image", + }, + { + "type": "MultiSIMDImageConsumerRaisingDim", + "name": "image_consumer", + "images_x": "$steps.identity_simd.x", + "images_y": "$steps.image_producer_y.image", + }, + { + "type": "DimensionCollapse", + "name": "inputs_concatenation", + "data": "$inputs.image", + }, + { + "type": "DimensionCollapse", + "name": "outputs_concatenation", + "data": "$steps.image_consumer.shapes", + }, + { + "type": "DimensionCollapse", + "name": "outputs_concatenation_2", + "data": "$steps.outputs_concatenation.output", + }, + ], + "outputs": [ + { + "type": "JsonField", + "name": "input_image", + "selector": "$inputs.image", + }, + { + "type": "JsonField", + "name": "shapes", + "selector": "$steps.image_consumer.shapes", + }, + { + "type": "JsonField", + "name": "collapsed_input", + "selector": "$steps.inputs_concatenation.output", + }, + { + "type": "JsonField", + "name": "collapsed_output", + "selector": "$steps.outputs_concatenation.output", + }, + { + "type": "JsonField", + "name": "collapsed_output_2", + "selector": "$steps.outputs_concatenation_2.output", + }, + { + "type": "JsonField", + "name": "breds_classification", + "selector": "$steps.breds_classification.predictions", + }, + ], +} + + +@mock.patch.object(blocks_loader, "get_plugin_modules") +def test_workflow_with_input_derived_dims_and_emergent_dims( + get_plugin_modules_mock: MagicMock, + model_manager: ModelManager, + dogs_image: np.ndarray, + crowd_image: np.ndarray, + roboflow_api_key: str, +) -> None: + # given + get_plugin_modules_mock.return_value = [ + "tests.workflows.integration_tests.execution.stub_plugins.plugin_image_producer" + ] + workflow_init_parameters = { + "workflows_core.model_manager": model_manager, + "workflows_core.api_key": roboflow_api_key, + "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, + } + + # then + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_INPUTS_DERIVED_NESTED_DIMS_AND_EMERGED_NESTED_DIMS, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run( + runtime_parameters={ + "image": [dogs_image, crowd_image], + } + ) + + # then + assert ( + len(result) == 2 + ), "Two inputs provided, their dimensions survived to the output, hence 2 outputs expected" + assert len(result[0]["collapsed_input"]) == 2 + assert np.allclose(result[0]["collapsed_input"][0].numpy_image, dogs_image) + assert np.allclose(result[0]["collapsed_input"][1].numpy_image, crowd_image) + assert np.allclose(result[0]["input_image"].numpy_image, dogs_image) + assert result[0]["shapes"] == ["[192, 168, 3][292, 168, 3]"] + assert result[0]["collapsed_output"] == ["[192, 168, 3][292, 168, 3]"] + assert result[0]["collapsed_output_2"] == [["[192, 168, 3][292, 168, 3]"]] + assert [ + e["top"] if e is not None else None for e in result[0]["breds_classification"] + ] == ["116.Parson_russell_terrier", None] + assert len(result[1]["collapsed_input"]) == 2 + assert np.allclose(result[1]["collapsed_input"][0].numpy_image, dogs_image) + assert np.allclose(result[1]["collapsed_input"][1].numpy_image, crowd_image) + assert np.allclose(result[1]["input_image"].numpy_image, crowd_image) + assert result[1]["shapes"] == ["[192, 168, 3][292, 168, 3]"] + assert result[1]["collapsed_output"] == ["[192, 168, 3][292, 168, 3]"] + assert result[1]["collapsed_output_2"] == [["[192, 168, 3][292, 168, 3]"]] + assert result[1]["breds_classification"] == [] diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py index 51802efefb..388631197c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_arbitrary_batch_inputs.py @@ -793,14 +793,17 @@ def test_workflow_when_non_batch_oriented_step_feeds_batch_oriented_step_operati "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_BATCH_ORIENTED_STEP_OPERATING_BATCH_WISE, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run(runtime_parameters={"non_batch_parameter": "some"}) + + # then + assert result == [{"result": 0.4}] WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_MIXED_INPUT_STEP = { @@ -1326,14 +1329,17 @@ def test_workflow_when_non_batch_oriented_step_feeds_compound_strictly_batch_ori "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + results = execution_engine.run(runtime_parameters={"non_batch_parameter": "some"}) # then - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + assert results == [{"result": 0.4}] WORKFLOW_WITH_BATCH_ORIENTED_STEP_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { @@ -1740,14 +1746,17 @@ def test_workflow_when_non_batch_oriented_input_feeds_compound_strictly_batch_or "workflows_core.api_key": None, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) + + # when + result = execution_engine.run(runtime_parameters={"data": "some"}) # then - with pytest.raises(ExecutionGraphStructureError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_NON_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_STRICTLY_BATCH_ORIENTED_STEP, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + assert result == [{"result": 0.4}] WORKFLOW_WITH_BATCH_ORIENTED_INPUT_FEEDING_COMPOUND_NON_BATCH_ORIENTED_STEP = { diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py b/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py index 7bda47948f..a2a0fb5196 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_detection_plus_ocr.py @@ -130,7 +130,6 @@ def test_detection_plus_ocr_workflow_when_minimal_valid_input_provided( # then assert isinstance(result, list), "Expected list to be delivered" assert len(result) == 1, "Expected 1 element in the output for one input image" - print(result[0]) assert set(result[0].keys()) == { "plates_ocr", "plates_crops", diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py b/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py index 7548a0b92e..97f9a7d321 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_dimensionality_change.py @@ -132,8 +132,6 @@ def test_workflow_with_detections_coordinates_transformation_in_batch_variant( result[0]["predictions_in_own_coordinates"], result[0]["predictions_in_original_coordinates"], ): - print(own_coords_detection["parent_id"]) - print(original_coords_detection["parent_id"]) assert len(own_coords_detection) == len( original_coords_detection ), "Expected number of bounding boxes in nested sv.Detections not to change" diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py b/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py index 0ebc83fa93..7e066b9165 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_dominant_color.py @@ -60,7 +60,7 @@ def test_dominant_color_workflow_when_minimal_valid_input_provided( "image": red_image, } ) - print(result) + # then assert isinstance(result, list), "Expected list to be delivered" assert len(result) == 1, "Expected 1 element in the output for one input image" diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py b/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py index f35cbf21bf..cd53c9a7f1 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_llama_vision.py @@ -393,7 +393,6 @@ def test_workflow_with_multi_class_classifier_prompt_and_legacy_parser( "top_class", "parsed_prediction", }, "Expected all outputs to be delivered" - print(result[0]["llama_result"]) assert ( isinstance(result[0]["llama_result"], str) and len(result[0]["llama_result"]) > 0 @@ -500,7 +499,6 @@ def test_workflow_with_multi_class_classifier_prompt( "top_class", "parsed_prediction", }, "Expected all outputs to be delivered" - print(result[0]["llama_result"]) assert ( isinstance(result[0]["llama_result"], str) and len(result[0]["llama_result"]) > 0 @@ -695,7 +693,6 @@ def test_workflow_with_structured_prompt( "result", "llama_output", }, "Expected all outputs to be delivered" - print(result[0]["llama_output"]) assert isinstance(result[0]["result"], str) diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py b/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py index 0235da4fc1..fd11fbf17c 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_perception_encoder.py @@ -48,6 +48,7 @@ ], } + @pytest.mark.skip(reason="Known problem of race condition in execution engine") def test_perception_encoder_embedding_model( model_manager: ModelManager, @@ -76,6 +77,7 @@ def test_perception_encoder_embedding_model( assert -1.0 <= result[0]["similarity"] <= 1.0 assert len(result[0]["image_embeddings"]) >= 1024 + PERCEPTION_ENCODER_TEXT_WORKFLOW = { "version": "1.0", "inputs": [ @@ -99,6 +101,7 @@ def test_perception_encoder_embedding_model( ], } + @pytest.mark.skip(reason="Known problem of race condition in execution engine") def test_perception_encoder_text_embedding_model( model_manager: ModelManager, diff --git a/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py b/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py index 2f719e5a86..9e5781452b 100644 --- a/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py +++ b/tests/workflows/integration_tests/execution/test_workflow_with_property_extraction.py @@ -4,6 +4,7 @@ import cv2 as cv import numpy as np import pytest +import supervision as sv from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS from inference.core.interfaces.camera.video_source import VideoSource @@ -12,7 +13,6 @@ from inference.core.interfaces.stream.watchdog import BasePipelineWatchDog from inference.core.managers.base import ModelManager from inference.core.workflows.core_steps.common.entities import StepExecutionMode -from inference.core.workflows.errors import StepOutputLineageError from inference.core.workflows.execution_engine.core import ExecutionEngine from tests.workflows.integration_tests.execution.workflows_gallery_collector.decorators import ( add_to_workflows_gallery, @@ -724,14 +724,29 @@ def test_workflow_when_there_is_faulty_application_of_aggregation_step_at_batch_ "workflows_core.api_key": roboflow_api_key, "workflows_core.step_execution_mode": StepExecutionMode.LOCAL, } + execution_engine = ExecutionEngine.init( + workflow_definition=WORKFLOW_WITH_INVALID_AGGREGATION, + init_parameters=workflow_init_parameters, + max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, + ) # when - with pytest.raises(StepOutputLineageError): - _ = ExecutionEngine.init( - workflow_definition=WORKFLOW_WITH_INVALID_AGGREGATION, - init_parameters=workflow_init_parameters, - max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS, - ) + result = execution_engine.run( + runtime_parameters={ + "image": [ + np.zeros((192, 168, 3), dtype=np.uint8), + np.zeros((200, 168, 3), dtype=np.uint8), + ] + } + ) + + # then + assert len(result) == 1, "Expected result to collapse" + assert ( + len(result[0]["result"]) == 2 + ), "Expected both predictions to be placed in the list" + assert isinstance(result[0]["result"][0], sv.Detections) + assert isinstance(result[0]["result"][1], sv.Detections) WORKFLOW_WITH_ASPECT_RATIO_EXTRACTION = { diff --git a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py index ca223e4b12..088488278c 100644 --- a/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py +++ b/tests/workflows/unit_tests/core_steps/fusion/test_detections_classes_replacement.py @@ -386,9 +386,15 @@ def test_classes_replacement_when_empty_classification_predictions_fallback_clas len(result["predictions"]) == 2 ), "Expected sv.Detections.empty(), as empty classification was passed" detections = result["predictions"] - assert detections.confidence[1] == 0, "Fallback class confidence expected to be set to 0" - assert detections.class_id[1] == 123, "class id expected to be set to value passed with fallback_class_id parameter" - assert detections.data["class_name"][1] == "unknown", "class name expected to be set to value passed with fallback_class_name parameter" + assert ( + detections.confidence[1] == 0 + ), "Fallback class confidence expected to be set to 0" + assert ( + detections.class_id[1] == 123 + ), "class id expected to be set to value passed with fallback_class_id parameter" + assert ( + detections.data["class_name"][1] == "unknown" + ), "class name expected to be set to value passed with fallback_class_name parameter" def test_extract_leading_class_from_prediction_when_prediction_is_multi_label() -> None: diff --git a/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py b/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py index 95dbb338e8..45b12407fd 100644 --- a/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py +++ b/tests/workflows/unit_tests/core_steps/models/foundation/test_perception_encoder.py @@ -18,9 +18,7 @@ @pytest.fixture def mock_model_manager(): mock = MagicMock() - mock.infer_from_request_sync.return_value = MagicMock( - embeddings=[[0.1, 0.2, 0.3]] - ) + mock.infer_from_request_sync.return_value = MagicMock(embeddings=[[0.1, 0.2, 0.3]]) return mock @@ -109,7 +107,9 @@ def test_run_remotely_with_text(mock_client_cls, mock_model_manager): @patch( "inference.core.workflows.core_steps.models.foundation.perception_encoder.v1.InferenceHTTPClient" ) -def test_run_remotely_with_image(mock_client_cls, mock_model_manager, mock_workflow_image_data): +def test_run_remotely_with_image( + mock_client_cls, mock_model_manager, mock_workflow_image_data +): mock_client = MagicMock() mock_client.get_perception_encoder_image_embeddings.return_value = { "embeddings": [[0.1, 0.2, 0.3]] @@ -126,4 +126,3 @@ def test_run_remotely_with_image(mock_client_cls, mock_model_manager, mock_workf assert result["embedding"] == [0.1, 0.2, 0.3] mock_client.get_perception_encoder_image_embeddings.assert_called_once() - diff --git a/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py b/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py index ecb316c4c6..b4db7d460e 100644 --- a/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py +++ b/tests/workflows/unit_tests/core_steps/sinks/test_onvif_movement.py @@ -54,5 +54,5 @@ def test_manifest_parsing_when_the_input_is_valid() -> None: camera_update_rate_limit=500, camera_port=1981, flip_x_movement=True, - flip_y_movement=True + flip_y_movement=True, ) diff --git a/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py b/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py index b421e34ef5..bdd79fe4f6 100644 --- a/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py +++ b/tests/workflows/unit_tests/core_steps/transformations/test_qr_code_generator.py @@ -12,44 +12,44 @@ class TestQRCodeGeneratorBlockV1: def test_qr_code_generator_block_manifest(self): # given block = QRCodeGeneratorBlockV1() - + # when manifest_class = block.get_manifest() outputs = manifest_class.describe_outputs() - + # then assert outputs[0].name == "qr_code" - assert hasattr(manifest_class, '__fields__') - assert 'type' in manifest_class.__fields__ - + assert hasattr(manifest_class, "__fields__") + assert "type" in manifest_class.__fields__ + def test_qr_code_generator_run_basic(self): # given block = QRCodeGeneratorBlockV1() text = "https://roboflow.com" - + # when result = block.run(text=text) - + # then assert "qr_code" in result assert isinstance(result["qr_code"], WorkflowImageData) assert result["qr_code"].numpy_image.shape[2] == 3 # RGB channels assert result["qr_code"].numpy_image.dtype == np.uint8 - + def test_qr_code_generator_run_with_parameters(self): # given block = QRCodeGeneratorBlockV1() text = "Test" - + # when - version and box_size are now hardcoded per spec result = block.run( text=text, error_correct="High (~30% word recovery / lowest data capacity)", border=2, fill_color="blue", - back_color="yellow" + back_color="yellow", ) - + # then assert "qr_code" in result assert isinstance(result["qr_code"], WorkflowImageData) @@ -60,136 +60,152 @@ class TestGenerateQRCode: def test_generate_qr_code_basic(self): # given text = "https://example.com" - + # when result = generate_qr_code(text=text) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[2] == 3 # RGB channels assert result.numpy_image.dtype == np.uint8 assert result.numpy_image.shape[0] > 0 # Has height assert result.numpy_image.shape[1] > 0 # Has width - + def test_generate_qr_code_with_hardcoded_defaults(self): # given text = "Test" - + # when - version and box_size are now hardcoded result = generate_qr_code(text=text, version=1, box_size=10) - + # then assert isinstance(result, WorkflowImageData) # Version 1 QR code with box_size=10 and border=4 should be (21+8)*10 = 290 pixels expected_size = (21 + 2 * 4) * 10 # 290 assert result.numpy_image.shape[0] == expected_size assert result.numpy_image.shape[1] == expected_size - + def test_generate_qr_code_auto_version(self): # given text = "Test with auto version" - + # when result = generate_qr_code(text=text, version=None) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[0] > 0 assert result.numpy_image.shape[1] > 0 - + def test_generate_qr_code_error_correction_levels(self): # given text = "Error correction test" - + # when/then - should not raise errors for valid display name levels for level in [ "Low (~7% word recovery / highest data capacity)", "Medium (~15% word recovery)", "Quartile (~25% word recovery)", - "High (~30% word recovery / lowest data capacity)" + "High (~30% word recovery / lowest data capacity)", ]: result = generate_qr_code(text=text, error_correct=level) assert isinstance(result, WorkflowImageData) - + def test_generate_qr_code_invalid_error_correction(self): # given text = "Test" - + # when result = generate_qr_code(text=text, error_correct="INVALID") - + # then - should default to ERROR_CORRECT_M assert isinstance(result, WorkflowImageData) - + def test_generate_qr_code_color_parsing(self): # given text = "Color test" - + # when/then - should handle various color formats # Test with standard color names (case-insensitive) result1 = generate_qr_code(text=text, fill_color="black", back_color="white") assert isinstance(result1, WorkflowImageData) - + # Test with uppercase standard names (matches supervision constants) result2 = generate_qr_code(text=text, fill_color="BLACK", back_color="WHITE") assert isinstance(result2, WorkflowImageData) - + # Test with hex colors - result3 = generate_qr_code(text=text, fill_color="#FF0000", back_color="#00FF00") + result3 = generate_qr_code( + text=text, fill_color="#FF0000", back_color="#00FF00" + ) assert isinstance(result3, WorkflowImageData) - + # Test with rgb format - result4 = generate_qr_code(text=text, fill_color="rgb(255, 0, 0)", back_color="rgb(0, 255, 0)") + result4 = generate_qr_code( + text=text, fill_color="rgb(255, 0, 0)", back_color="rgb(0, 255, 0)" + ) assert isinstance(result4, WorkflowImageData) - + # Test with CSS3 color names (fallback) - result5 = generate_qr_code(text=text, fill_color="mediumpurple", back_color="lightblue") + result5 = generate_qr_code( + text=text, fill_color="mediumpurple", back_color="lightblue" + ) assert isinstance(result5, WorkflowImageData) - + def test_generate_qr_code_supervision_color_compatibility(self): """Test that all supervision standard colors work with QR code generation.""" # given text = "Supervision color test" - + # Test all standard supervision colors - standard_colors = ["BLACK", "WHITE", "RED", "GREEN", "BLUE", "YELLOW", "ROBOFLOW"] - + standard_colors = [ + "BLACK", + "WHITE", + "RED", + "GREEN", + "BLUE", + "YELLOW", + "ROBOFLOW", + ] + for color_name in standard_colors: # when - using supervision standard color names - result = generate_qr_code(text=text, fill_color=color_name, back_color="WHITE") - + result = generate_qr_code( + text=text, fill_color=color_name, back_color="WHITE" + ) + # then - should successfully generate QR code assert isinstance(result, WorkflowImageData) assert result.numpy_image is not None assert result.numpy_image.shape[2] == 3 # RGB image - + # Test mixed formats to ensure conversions work result_mixed = generate_qr_code( - text=text, + text=text, fill_color="ROBOFLOW", # supervision constant - back_color="#FFFFFF" # hex format + back_color="#FFFFFF", # hex format ) assert isinstance(result_mixed, WorkflowImageData) - + def test_generate_qr_code_box_size_and_border(self): # given text = "Size test" - + # when - testing with different parameters (function still accepts them) result_small = generate_qr_code(text=text, version=1, box_size=5, border=2) result_large = generate_qr_code(text=text, version=1, box_size=15, border=6) - + # then assert result_small.numpy_image.shape[0] < result_large.numpy_image.shape[0] assert result_small.numpy_image.shape[1] < result_large.numpy_image.shape[1] - + def test_generate_qr_code_empty_text(self): # given text = "" - + # when result = generate_qr_code(text=text) - + # then assert isinstance(result, WorkflowImageData) assert result.numpy_image.shape[0] > 0 @@ -198,19 +214,19 @@ def test_generate_qr_code_empty_text(self): @pytest.mark.skipif( True, # Skip until qrcode dependency is resolved in CI - reason="qrcode library may not be available in test environment" + reason="qrcode library may not be available in test environment", ) class TestQRCodeGeneratorIntegration: def test_qr_code_format_is_png_compatible(self): # given text = "https://roboflow.com" - + # when result = generate_qr_code(text=text) - + # then # Verify the image can be used by other workflow blocks assert isinstance(result, WorkflowImageData) assert result.numpy_image.dtype == np.uint8 assert len(result.numpy_image.shape) == 3 - assert result.numpy_image.shape[2] == 3 # RGB format expected by IconVisualizer \ No newline at end of file + assert result.numpy_image.shape[2] == 3 # RGB format expected by IconVisualizer diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py b/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py index ca6c4e0752..495c19611d 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_icon.py @@ -115,7 +115,7 @@ def test_icon_validation_when_dynamic_mode_with_default_position() -> None: # when result = IconManifest.model_validate(data) - + # then assert result.position == "TOP_CENTER" # Check default value is used @@ -142,13 +142,13 @@ def test_icon_validation_when_invalid_image_is_given() -> None: def test_icon_visualization_block_static_mode(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (red square) test_icon_np = np.zeros((32, 32, 3), dtype=np.uint8) test_icon_np[:, :, 2] = 255 # Make it red @@ -185,13 +185,13 @@ def test_icon_visualization_block_static_mode(): def test_icon_visualization_block_dynamic_mode(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (blue square) test_icon_np = np.zeros((32, 32, 3), dtype=np.uint8) test_icon_np[:, :, 0] = 255 # Make it blue @@ -234,13 +234,13 @@ def test_icon_visualization_block_dynamic_mode(): def test_icon_visualization_block_static_mode_negative_positioning(): # given block = IconVisualizationBlockV1() - + # Create test images test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="test"), numpy_image=np.zeros((1000, 1000, 3), dtype=np.uint8), ) - + # Create test icon (green square) test_icon_np = np.zeros((50, 50, 3), dtype=np.uint8) test_icon_np[:, :, 1] = 255 # Make it green @@ -287,7 +287,7 @@ def test_icon_validation_when_static_mode_with_defaults() -> None: # when result = IconManifest.model_validate(data) - + # then assert result.x_position == 10 assert result.y_position == 10 diff --git a/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py b/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py index 9634587930..c78c4355de 100644 --- a/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py +++ b/tests/workflows/unit_tests/core_steps/visualizations/test_icon_alpha.py @@ -18,33 +18,33 @@ def test_icon_with_alpha_from_workflow_input_numpy(): """Test that alpha channel is preserved when icon comes from workflow input as numpy.""" # given block = IconVisualizationBlockV1() - + # Create a test background image (white) bg_image = np.ones((500, 500, 3), dtype=np.uint8) * 255 test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="background"), numpy_image=bg_image, ) - + # Create an icon with alpha channel (red circle with transparent background) icon_with_alpha = np.zeros((100, 100, 4), dtype=np.uint8) center = (50, 50) for y in range(100): for x in range(100): - dist = np.sqrt((x - center[0])**2 + (y - center[1])**2) + dist = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2) if dist <= 40: # Red circle with full opacity icon_with_alpha[y, x] = [0, 0, 255, 255] # BGRA else: # Transparent background icon_with_alpha[y, x] = [0, 0, 0, 0] - + # Create WorkflowImageData from numpy array (simulating workflow input) test_icon = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="icon_input"), numpy_image=icon_with_alpha, # Has 4 channels with alpha ) - + # Run the block in static mode output = block.run( image=test_image, @@ -58,9 +58,9 @@ def test_icon_with_alpha_from_workflow_input_numpy(): x_position=200, y_position=200, ) - - result_image = output['image'].numpy_image - + + result_image = output["image"].numpy_image + # Check that transparency was preserved # The corners of where the icon was placed should still be white corner_positions = [ @@ -69,58 +69,60 @@ def test_icon_with_alpha_from_workflow_input_numpy(): (200, 299), # Bottom-left corner (299, 299), # Bottom-right corner ] - + for x, y in corner_positions: pixel_color = result_image[y, x] # Should be white or very close to white - assert np.all(pixel_color > 250), \ - f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" - + assert np.all( + pixel_color > 250 + ), f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" + # Check that the red circle is visible in the center center_x, center_y = 250, 250 center_color = result_image[center_y, center_x] - + # Should be red (high red channel, low blue/green) assert center_color[2] > 200, f"Red channel too low at center: {center_color}" - assert center_color[0] < 100 and center_color[1] < 100, \ - f"Blue/Green should be low at center: {center_color}" + assert ( + center_color[0] < 100 and center_color[1] < 100 + ), f"Blue/Green should be low at center: {center_color}" def test_icon_with_alpha_from_base64_input(): """Test that alpha channel is preserved when icon comes as base64 (API input scenario).""" # given block = IconVisualizationBlockV1() - + # Create a test background image (white) bg_image = np.ones((500, 500, 3), dtype=np.uint8) * 255 test_image = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="background"), numpy_image=bg_image, ) - + # Create an icon with alpha channel icon_with_alpha = np.zeros((100, 100, 4), dtype=np.uint8) center = (50, 50) for y in range(100): for x in range(100): - dist = np.sqrt((x - center[0])**2 + (y - center[1])**2) + dist = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2) if dist <= 40: # Blue circle with full opacity icon_with_alpha[y, x] = [255, 0, 0, 255] # BGRA else: # Transparent background icon_with_alpha[y, x] = [0, 0, 0, 0] - + # Encode as PNG base64 (preserves alpha) - _, png_buffer = cv2.imencode('.png', icon_with_alpha) - png_base64 = base64.b64encode(png_buffer).decode('ascii') - + _, png_buffer = cv2.imencode(".png", icon_with_alpha) + png_base64 = base64.b64encode(png_buffer).decode("ascii") + # Create WorkflowImageData from base64 (simulating API input) test_icon = WorkflowImageData( parent_metadata=ImageParentMetadata(parent_id="icon_base64"), base64_image=png_base64, ) - + # Run the block in static mode output = block.run( image=test_image, @@ -134,28 +136,30 @@ def test_icon_with_alpha_from_base64_input(): x_position=200, y_position=200, ) - - result_image = output['image'].numpy_image - + + result_image = output["image"].numpy_image + # Check that transparency was preserved corner_positions = [ (200, 200), # Top-left corner - (299, 200), # Top-right corner + (299, 200), # Top-right corner (200, 299), # Bottom-left corner (299, 299), # Bottom-right corner ] - + for x, y in corner_positions: pixel_color = result_image[y, x] # Should be white or very close to white (allowing small variations) - assert np.all(pixel_color > 250), \ - f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" - + assert np.all( + pixel_color > 250 + ), f"Expected white background at ({x},{y}) due to transparency, got {pixel_color}" + # Check that the blue circle is visible in the center center_x, center_y = 250, 250 center_color = result_image[center_y, center_x] - + # Should be blue (high blue channel, low red/green) assert center_color[0] > 200, f"Blue channel too low at center: {center_color}" - assert center_color[1] < 100 and center_color[2] < 100, \ - f"Red/Green should be low at center: {center_color}" + assert ( + center_color[1] < 100 and center_color[2] < 100 + ), f"Red/Green should be low at center: {center_color}" diff --git a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py index 7b452748a2..3e12217f89 100644 --- a/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py +++ b/tests/workflows/unit_tests/execution_engine/executor/test_output_constructor.py @@ -8,6 +8,10 @@ from inference.core.workflows.core_steps.loader import KINDS_SERIALIZERS from inference.core.workflows.errors import AssumptionError, ExecutionEngineRuntimeError +from inference.core.workflows.execution_engine.constants import ( + TOP_LEVEL_LINEAGES_KEY, + WORKFLOW_INPUT_BATCH_LINEAGE_ID, +) from inference.core.workflows.execution_engine.entities.base import JsonField from inference.core.workflows.execution_engine.entities.types import ( IMAGE_KIND, @@ -497,6 +501,7 @@ def test_construct_workflow_output_when_batch_outputs_present() -> None: data_lookup = { "$steps.other.c": "c_value", } + execution_graph.graph[TOP_LEVEL_LINEAGES_KEY] = WORKFLOW_INPUT_BATCH_LINEAGE_ID def get_non_batch_data(selector: str) -> Any: return data_lookup[selector]