Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 5 additions & 6 deletions src/finn/builder/build_dataflow_config.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
from typing import Any, List, Optional

from finn.transformation.fpgadataflow.vitis_build import VitisOptStrategy
from finn.util.basic import alveo_default_platform, alveo_part_map, pynq_part_map
from finn.util.basic import alveo_default_platform, part_map


class AutoFIFOSizingMethod(str, Enum):
Expand Down Expand Up @@ -370,11 +370,10 @@ def _resolve_driver_platform(self):
def _resolve_fpga_part(self):
if self.fpga_part is None:
# lookup from part map if not specified
if self.shell_flow_type == ShellFlowType.VIVADO_ZYNQ:
return pynq_part_map[self.board]
elif self.shell_flow_type == ShellFlowType.VITIS_ALVEO:
return alveo_part_map[self.board]
else:
try:
fpga_part = part_map[self.board]
return fpga_part
except KeyError:
raise Exception("Couldn't resolve fpga_part for " + self.board)
else:
# return as-is when explicitly specified
Expand Down
27 changes: 26 additions & 1 deletion src/finn/transformation/fpgadataflow/convert_to_hw_layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,32 @@ def apply(self, model):
if n.op_type == "Upsample":
scales = model.get_initializer(n.input[1])
else:
scales = model.get_initializer(n.input[2])
if len(n.input) == 2:
# Resize version 10
scales = model.get_initializer(n.input[1])
elif len(n.input) == 3:
# Resize version 11 and up (no size input)
scales = model.get_initializer(n.input[2])
elif len(n.input) == 4:
# Resize version 11 and up
scales_exists = (model.get_initializer(n.input[2]) is not None) and (
len(model.get_initializer(n.input[2])) != 0
)
sizes_exists = (model.get_initializer(n.input[3]) is not None) and (
len(model.get_initializer(n.input[3])) != 0
)
assert scales_exists ^ sizes_exists, (
"%s: Either scales or the target output size must "
"be specified. Specifying both is prohibited." % n.name
)
if scales_exists:
# Scales input
scales = model.get_initializer(n.input[2])
else:
# Convert sizes to scales
sizes = model.get_initializer(n.input[3])
data_input_size = model.get_tensor_shape(n.input[0])
scales = sizes / data_input_size
in_shape = model.get_tensor_shape(n.input[0])

dt = model.get_tensor_datatype(n.input[0])
Expand Down
44 changes: 36 additions & 8 deletions src/finn/transformation/streamline/reorder.py
Original file line number Diff line number Diff line change
Expand Up @@ -769,9 +769,37 @@ def apply(self, model):
consumer = model.find_consumer(n.output[0])
producer = model.find_producer(n.input[0])
if n.op_type == "Upsample":
scales_ind = 1
transformation_ind = 1
d_type = "float32"
else:
scales_ind = 2
if len(n.input) == 2:
# Resize version 10
transformation_ind = 1
d_type = "float32"
elif len(n.input) == 3:
# Resize version 11 and up (no size input)
transformation_ind = 2
d_type = "float32"
elif len(n.input) == 4:
# Resize version 11 and up
scales_exists = (model.get_initializer(n.input[2]) is not None) and (
len(model.get_initializer(n.input[2])) != 0
)
sizes_exists = (model.get_initializer(n.input[3]) is not None) and (
len(model.get_initializer(n.input[3])) != 0
)
assert scales_exists ^ sizes_exists, (
"%s: Either scales or the target output size must "
"be specified. Specifying both is prohibited." % n.name
)
if scales_exists:
# Scales input
transformation_ind = 2
d_type = "float32"
else:
# Sizes input
transformation_ind = 3
d_type = "int64"
if producer is not None and producer.op_type == "Transpose":
perms = list(get_by_name(producer.attribute, "perm").ints)
if perms == [0, 3, 1, 2]:
Expand All @@ -781,12 +809,12 @@ def apply(self, model):
model = model.transform(MoveTransposePastFork())
# topology modified, "ask" ModelWrapper to apply this transform again
return (model, True)
old_value = model.get_initializer(n.input[scales_ind])
old_value = model.get_initializer(n.input[transformation_ind])
new_value = np.array(
[old_value[idx] for idx in (0, 2, 3, 1)],
dtype=np.dtype("float32"),
dtype=np.dtype(d_type),
)
model.set_initializer(n.input[scales_ind], new_value)
model.set_initializer(n.input[transformation_ind], new_value)
start_name = producer.input[0]
mid_name = n.input[0]
end_name = n.output[0]
Expand All @@ -803,12 +831,12 @@ def apply(self, model):
elif consumer is not None and consumer.op_type == "Transpose":
perms = list(get_by_name(consumer.attribute, "perm").ints)
if perms == [0, 2, 3, 1]:
old_value = model.get_initializer(n.input[scales_ind])
old_value = model.get_initializer(n.input[transformation_ind])
new_value = np.array(
[old_value[idx] for idx in (0, 2, 3, 1)],
dtype=np.dtype("float32"),
dtype=np.dtype(d_type),
)
model.set_initializer(n.input[scales_ind], new_value)
model.set_initializer(n.input[transformation_ind], new_value)
start_name = n.input[0]
mid_name = consumer.input[0]
end_name = consumer.output[0]
Expand Down
1 change: 1 addition & 0 deletions src/finn/util/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,6 +81,7 @@
part_map = {**pynq_part_map, **alveo_part_map}
part_map["VEK280"] = "xcve2802-vsvh1760-2MP-e-S"
part_map["VCK190"] = "xcvc1902-vsva2197-2MP-e-S"
part_map["V80"] = "xcv80-lsva4737-2MHP-e-s"


def get_rtlsim_trace_depth():
Expand Down
1 change: 0 additions & 1 deletion tests/fpgadataflow/test_fifosizing.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,6 @@ def test_fifosizing_linear(method, topology):
synth_clk_period_ns=10.0,
board="Pynq-Z1",
rtlsim_batch_size=100 if topology == "tfc" else 2,
shell_flow_type=build_cfg.ShellFlowType.VIVADO_ZYNQ,
generate_outputs=[
build_cfg.DataflowOutputType.ESTIMATE_REPORTS,
build_cfg.DataflowOutputType.STITCHED_IP,
Expand Down
80 changes: 79 additions & 1 deletion tests/transformation/streamline/test_scale_resize_nhwc.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,6 +180,60 @@ def create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt):
return model


def create_resize_transpose_sizes(ifm_dim, ifm_ch, sizes, mode, idt):
ofm_dim_h = sizes[2]
ofm_dim_w = sizes[3]
inp = oh.make_tensor_value_info("inp", TensorProto.FLOAT, [1, ifm_ch, ifm_dim[0], ifm_dim[1]])

# Empty scales
scales = oh.make_tensor_value_info("scales", TensorProto.FLOAT, [])

# Not actually used, only needed for compliance with the Resize node interface
roi = oh.make_tensor_value_info("roi", TensorProto.FLOAT, [4])

param = oh.make_tensor_value_info("sizes", TensorProto.INT64, [4])

outp_up = oh.make_tensor_value_info(
"outp_up", TensorProto.FLOAT, [1, ifm_ch, ofm_dim_h, ofm_dim_w]
)
outp = oh.make_tensor_value_info("outp", TensorProto.FLOAT, [1, ofm_dim_h, ofm_dim_w, ifm_ch])

resize_node = oh.make_node(
"Resize",
inputs=["inp", "roi", "scales", "sizes"],
outputs=["outp_up"],
name="Resize1",
mode=mode,
)

transpose_node = onnx.helper.make_node(
"Transpose",
inputs=["outp_up"],
outputs=["outp"],
name="Transpose1",
perm=[0, 2, 3, 1],
)

graph = oh.make_graph(
nodes=[resize_node, transpose_node],
name="resize_graph",
inputs=[inp],
outputs=[outp],
value_info=[outp_up, roi, scales, param],
)

model = qonnx_make_model(graph, producer_name="resize_model4")
model = ModelWrapper(model)
model.set_tensor_datatype("inp", idt)
model.set_tensor_datatype("outp", idt)

model.set_tensor_layout("inp", DataLayout.NCHW)
model = model.transform(InferShapes())
model = model.transform(InferDataLayouts())

return model


def check_transform(model):
graph = model.graph
node_ind = 0
Expand All @@ -198,20 +252,27 @@ def check_transform(model):
@pytest.mark.parametrize("ifm_ch", [3])
# scales
@pytest.mark.parametrize("scales", [[1, 1, i, j] for i in range(2, 5) for j in range(2, 5)])
# sizes
@pytest.mark.parametrize(
"sizes", [[1, 3, 2**i, 2**j] for i in range(6, 7) for j in range(6, 7)]
)
# mode
@pytest.mark.parametrize("mode", ["nearest"])
# input datatype
@pytest.mark.parametrize("idt", [DataType["INT4"]])
def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt):
def test_scale_resize_nhwc(ifm_dim, ifm_ch, sizes, scales, mode, idt):
# create models
resize_model1 = create_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt)
resize_model2 = create_transpose_resize(ifm_dim, ifm_ch, scales, mode, idt)
resize_model3 = create_transpose_resize_transpose(ifm_dim, ifm_ch, scales, mode, idt)
resize_model4 = create_resize_transpose_sizes(ifm_dim, ifm_ch, sizes, mode, idt)

# set initializers
resize_model1.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model2.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model3.set_initializer("scales", np.array(scales, dtype=np.float32))
resize_model4.set_initializer("sizes", np.array(sizes, dtype=np.int64))
resize_model4.set_initializer("scales", np.array([], dtype=np.float32))

# generate input tensor for testing
input_tensor_nchw = gen_finn_dt_tensor(idt, [1, ifm_ch, ifm_dim[0], ifm_dim[1]])
Expand Down Expand Up @@ -269,3 +330,20 @@ def test_scale_resize_nhwc(ifm_dim, ifm_ch, scales, mode, idt):
# compare outputs
assert (expected3 == output3).all()
assert check_transform(resize_model3)

# execute fourth model
output_dict4 = oxe.execute_onnx(resize_model4, input_dict_nchw)
expected4 = output_dict4["outp"]

# transform Resize into ResizeNHWC
resize_model4 = resize_model4.transform(MakeScaleResizeNHWC())
resize_model4 = resize_model4.transform(InferDataLayouts())

# execute transformed model
output_node_name4 = resize_model4.graph.output[0].name
output_dict4 = oxe.execute_onnx(resize_model4, input_dict_nchw, return_full_exec_context=False)
output4 = output_dict4[output_node_name4]

# compare outputs
assert (expected4 == output4).all()
assert check_transform(resize_model4)