diff --git a/CHANGELOG.md b/CHANGELOG.md index c15b92e98ef..3d1a9fee030 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ Full documentation for MIGraphX is available at * Updated `argmin` and `argmax` ops to be implemented as reduction ops, so they now have JIT support and can fuse (#4620). * Replaced usages of `std::cout` and `std::cerr` with the logger (#4732) * Converted RNN variable sequence length operations (`rnn_var_sl_shift_sequence`, `rnn_var_sl_shift_output`, `rnn_var_sl_last_output`) from device implementation to JIT compilation (#4755). +* Updated netron output to create an ONNX-like protobuff. Now also includes debug symbols if enabled. (#4701) ### Resolved issues diff --git a/codecov.yml b/codecov.yml index 9f2569b0669..03abe2daeb2 100644 --- a/codecov.yml +++ b/codecov.yml @@ -2,4 +2,3 @@ ignore: - "test/" - "src/driver" - "build/" - - "src/netron_output.cpp" diff --git a/docs/driver/read.rst b/docs/driver/read.rst index db32b11dda7..dfc0d17b0f0 100644 --- a/docs/driver/read.rst +++ b/docs/driver/read.rst @@ -80,7 +80,7 @@ Print out program as json. .. option:: --netron -Print out program as a Netron viewable json file. +Print out program as ONNX protobuf binary viewable in Netron. .. option:: --text diff --git a/docs/migraphx-driver.rst b/docs/migraphx-driver.rst index b84edb0f371..26422154c16 100644 --- a/docs/migraphx-driver.rst +++ b/docs/migraphx-driver.rst @@ -86,7 +86,7 @@ To learn which options can be used with which commands, see the :ref:`MIGraphX d * - --binary - Prints the program in binary format * - --netron - - Prints the program in Netron viewable JSON format + - Prints the program as ONNX protobuf binary viewable in Netron * - --output | -o - Writes output in a file * - --fill0 diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index a91750e00ac..254a3282820 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -97,7 +97,6 @@ add_library(migraphx memory_coloring.cpp module.cpp msgpack.cpp - netron_output.cpp normalize_attributes.cpp normalize_ops.cpp op_enums.cpp diff --git a/src/api/api.cpp b/src/api/api.cpp index f6fac27b50b..ff7a7e8590b 100644 --- a/src/api/api.cpp +++ b/src/api/api.cpp @@ -39,7 +39,9 @@ #include #include #include +#include #include +#include #include #include @@ -337,6 +339,15 @@ static std::vector get_output_shapes(program& p) { return p.get_output_sh static void print_program(const program& p) { std::cout << p << std::endl; } +static void write_netron_output_file(const program& p, const char* filename) +{ + std::ofstream os(filename, std::ios::binary); + if(not os.is_open()) + MIGRAPHX_THROW(migraphx_status_bad_param, + "Failed to open file for writing: " + std::string(filename)); + write_netron_output(p, os); +} + static void print_module(const module& m) { std::cout << m << std::endl; } static migraphx::instruction_ref add_allocation(module& m, const migraphx::shape& s) @@ -1756,6 +1767,17 @@ extern "C" migraphx_status migraphx_program_print(const_migraphx_program_t progr return api_error_result; } +extern "C" migraphx_status migraphx_program_write_netron_output(const_migraphx_program_t program, + const char* filename) +{ + auto api_error_result = migraphx::try_([&] { + if(program == nullptr) + MIGRAPHX_THROW(migraphx_status_bad_param, "Bad parameter program: Null pointer"); + migraphx::write_netron_output_file((program->object), (filename)); + }); + return api_error_result; +} + extern "C" migraphx_status migraphx_program_sort(migraphx_program_t program) { auto api_error_result = migraphx::try_([&] { diff --git a/src/api/include/migraphx/migraphx.h b/src/api/include/migraphx/migraphx.h index 699f3eddf95..674d23ff56b 100644 --- a/src/api/include/migraphx/migraphx.h +++ b/src/api/include/migraphx/migraphx.h @@ -464,6 +464,9 @@ MIGRAPHX_C_EXPORT migraphx_status migraphx_program_get_output_shapes(migraphx_sh MIGRAPHX_C_EXPORT migraphx_status migraphx_program_print(const_migraphx_program_t program); +MIGRAPHX_C_EXPORT migraphx_status +migraphx_program_write_netron_output(const_migraphx_program_t program, const char* filename); + MIGRAPHX_C_EXPORT migraphx_status migraphx_program_sort(migraphx_program_t program); MIGRAPHX_C_EXPORT migraphx_status migraphx_program_run(migraphx_arguments_t* out, diff --git a/src/api/include/migraphx/migraphx.hpp b/src/api/include/migraphx/migraphx.hpp index 872fdcde1b9..ffb8bffeb46 100644 --- a/src/api/include/migraphx/migraphx.hpp +++ b/src/api/include/migraphx/migraphx.hpp @@ -1232,6 +1232,11 @@ struct program : MIGRAPHX_HANDLE_BASE(program) void print() const { call(&migraphx_program_print, this->get_handle_ptr()); } + void write_netron_output(const char* filename) const + { + call(&migraphx_program_write_netron_output, this->get_handle_ptr(), filename); + } + program sort() { call(&migraphx_program_sort, this->get_handle_ptr()); diff --git a/src/api/migraphx.py b/src/api/migraphx.py index 18bfb365719..3c92a697539 100644 --- a/src/api/migraphx.py +++ b/src/api/migraphx.py @@ -220,7 +220,10 @@ def shapes(h): @api.handle('migraphx_instruction', 'migraphx::instruction_ref') def instruction(h): - pass + h.method('get_debug_symbols', + fname='get_debug_symbols', + returns='const std::set&', + const=True) @api.handle('migraphx_instructions', 'std::vector') @@ -265,6 +268,16 @@ def module(h): api.params(s='const migraphx::shape&'), invoke='migraphx::add_allocation($@)', returns='migraphx::instruction_ref') + h.method('has_debug_symbols', + fname='has_debug_symbols', + returns='bool', + const=True) + h.method('add_debug_symbols', + api.params(ins='migraphx::instruction_ref', symbols='std::set'), + fname='add_debug_symbols') + h.method('remove_debug_symbols', + api.params(ins='migraphx::instruction_ref'), + fname='remove_debug_symbols') @auto_handle() @@ -284,6 +297,10 @@ def program(h): invoke='migraphx::get_output_shapes($@)', returns='std::vector') h.method('print', invoke='migraphx::print_program($@)', const=True) + h.method('write_netron_output', + api.params(filename='const char*'), + invoke='migraphx::write_netron_output_file($@)', + const=True) h.method('sort') h.method('run', api.params( diff --git a/src/driver/main.cpp b/src/driver/main.cpp index 9394236b6fb..1eeb5d67fc9 100644 --- a/src/driver/main.cpp +++ b/src/driver/main.cpp @@ -279,7 +279,7 @@ struct loader ap.set_value("binary")); ap(output_type, {"--netron"}, - ap.help("Print out program as Netron readable json."), + ap.help("Print out program as ONNX protobuf binary viewable in Netron."), ap.set_value("netron")); ap(output, {"--output", "-o"}, ap.help("Output to file.")); } @@ -543,7 +543,7 @@ struct loader else if(type == "binary") write(*os, save_buffer(p)); else if(type == "netron") - *os << make_netron_output(p) << std::endl; + write_netron_output(p, *os); } }; diff --git a/src/include/migraphx/netron_output.hpp b/src/include/migraphx/netron_output.hpp index fb355a2d9f5..de1f3884366 100644 --- a/src/include/migraphx/netron_output.hpp +++ b/src/include/migraphx/netron_output.hpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2024 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -24,14 +24,15 @@ #ifndef MIGRAPHX_GUARD_RTGLIB_NETRON_OUTPUT_HPP #define MIGRAPHX_GUARD_RTGLIB_NETRON_OUTPUT_HPP -#include +#include #include #include +#include namespace migraphx { inline namespace MIGRAPHX_INLINE_NS { -MIGRAPHX_EXPORT std::string make_netron_output(const program& prog); +MIGRAPHX_ONNX_EXPORT void write_netron_output(const program& prog, std::ostream& os); } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx diff --git a/src/include/migraphx/program.hpp b/src/include/migraphx/program.hpp index 8bc7310c2d2..f5d0d372795 100644 --- a/src/include/migraphx/program.hpp +++ b/src/include/migraphx/program.hpp @@ -1,7 +1,7 @@ /* * The MIT License (MIT) * - * Copyright (c) 2015-2025 Advanced Micro Devices, Inc. All rights reserved. + * Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal @@ -56,6 +56,7 @@ struct marker; */ struct MIGRAPHX_EXPORT program { + program(); explicit program(module m); @@ -79,6 +80,8 @@ struct MIGRAPHX_EXPORT program std::unordered_map get_parameter_shapes() const; + int get_program_file_version() const; + std::size_t total_instructions() const; std::vector eval(const parameter_map& params, @@ -165,6 +168,9 @@ struct MIGRAPHX_EXPORT program private: void assign(const program& p); std::unique_ptr impl; + // program file version is for the data structure or format of the MXR file. Version should be bumped + // if any changes occur to the format of the MXR file. + static constexpr int program_file_version = 8; }; } // namespace MIGRAPHX_INLINE_NS } // namespace migraphx diff --git a/src/netron_output.cpp b/src/netron_output.cpp deleted file mode 100644 index 8323556f169..00000000000 --- a/src/netron_output.cpp +++ /dev/null @@ -1,283 +0,0 @@ -/* - * The MIT License (MIT) - * - * Copyright (c) 2015-2025 Advanced Micro Devices, Inc. All rights reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a copy - * of this software and associated documentation files (the "Software"), to deal - * in the Software without restriction, including without limitation the rights - * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell - * copies of the Software, and to permit persons to whom the Software is - * furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE - * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER - * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, - * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN - * THE SOFTWARE. - */ - -#include -#include -#include -#include -#include -#include -#include - -namespace migraphx { -inline namespace MIGRAPHX_INLINE_NS { -namespace { - -// from https://onnx.ai/onnx/intro/concepts.html -int get_onnx_type(shape::type_t s_type) -{ - switch(s_type) - { - case shape::float_type: return 1; - case shape::uint8_type: return 2; - case shape::int8_type: return 3; - case shape::uint16_type: return 4; - case shape::int16_type: return 5; - case shape::int32_type: return 6; - case shape::int64_type: return 7; - case shape::bool_type: return 9; - case shape::half_type: return 10; - case shape::double_type: return 11; - case shape::uint32_type: return 12; - case shape::uint64_type: return 13; - case shape::bf16_type: return 16; - case shape::fp8e4m3fn_type: return 17; - case shape::fp8e4m3fnuz_type: return 18; - case shape::fp8e5m2_type: return 19; - case shape::fp8e5m2fnuz_type: return 20; - case shape::tuple_type: return 0; - case shape::fp4x2_type: return 21; // TODO update this when the type is added - } - MIGRAPHX_THROW("MIGraphX type " + std::to_string(s_type) + " not supported"); -} - -auto make_attribute(const migraphx::value& val) -{ - value attribute = value(std::unordered_map()); - attribute["name"] = val.get_key(); - auto val_string = val.to(); - std::string sub_str = val.get_key() + ":"; - auto find_key = val_string.find(sub_str); - if(find_key != std::string::npos) - { - val_string = val_string.substr(find_key + sub_str.length() + 1); - } - // TODO: doesn't work for some reason with Netron now - // attribute["s"] = base64_encode(val_string); - // attribute["type"] = "STRING"; - attribute["docString"] = val_string; - return attribute; -} - -/// Returns a value with the JSON structure needed for a node -auto make_onnx_json_node(instruction_ref ins, - std::unordered_map ins_uids) -{ - value node; - // TODO add support for module inputs - value input_arr = value({}); - for(instruction_ref input_ins : ins->inputs()) - { - auto name = input_ins->name(); - if(name == "@literal" or name == "@param") - { - input_arr.push_back(ins_uids.at(input_ins)); - } - // TODO make a better process for handling nodes to ignore - else if(name.find("hip::hip_allocate_memory") != std::string::npos) - { - continue; - } - else - { - input_arr.push_back(ins_uids.at(input_ins) + "->" + ins_uids.at(ins)); - } - } - value output_arr = value({}); - for(instruction_ref output_ins : ins->outputs()) - { - if(output_ins->name() == "@return") - { - output_arr.push_back(ins_uids.at(output_ins)); - } - else - { - output_arr.push_back(ins_uids.at(ins) + "->" + ins_uids.at(output_ins)); - } - } - node["input"] = input_arr; - node["output"] = output_arr; - node["name"] = ins_uids.at(ins); - node["opType"] = ins->name(); - value op_attribute_arr = value({}); - auto op_value = ins->get_operator().to_value(); - std::for_each(op_value.begin(), op_value.end(), [&](const auto& v) { - const std::string& attr_key = v.get_key(); - if(v.is_binary() or attr_key == "code_object") - { - return; - } - else if(attr_key == "symbol_name" or attr_key == "name") - { - node["opType"] = migraphx::from_value(v); - } - else - { - op_attribute_arr.push_back(make_attribute(v)); - } - }); - node["attribute"] = op_attribute_arr; - return node; -} - -// ONNX graph constant data called "initializer" -auto make_onnx_json_literal(instruction_ref ins, - std::unordered_map ins_uids) -{ - value lit; - lit["dims"] = ins->get_shape().lens(); - lit["dataType"] = get_onnx_type(ins->get_shape().type()); - lit["name"] = ins_uids.at(ins); - // ignoring literal data, setting to "NULL" in base64 - lit["rawData"] = "TlVMTA=="; - return lit; -} - -// TODO handle dynamic shapes -// TODO handle subshapes -auto make_onnx_json_shape(const shape& s) -{ - value ret; - value dim = value({}); - for(std::size_t len : s.lens()) - { - // cppcheck-suppress useStlAlgorithm - dim.push_back({{"dimValue", len}}); - } - ret["dim"] = dim; - return ret; -} - -// ONNX graph edges called "valueType" -auto make_onnx_json_edge(instruction_ref ins, - instruction_ref out_ins, - std::unordered_map ins_uids) -{ - value ret; - shape ins_shape = ins->get_shape(); - ret["name"] = ins_uids.at(ins) + "->" + ins_uids.at(out_ins); - value type = {{"tensorType", - {{"elemType", get_onnx_type(ins_shape.type())}, - {"shape", make_onnx_json_shape(ins_shape)}}}}; - ret["type"] = type; - return ret; -} - -auto make_onnx_json_in_out(instruction_ref ins, - std::unordered_map ins_uids) -{ - value ret; - shape ins_shape = ins->get_shape(); - ret["name"] = ins_uids.at(ins); - value type = {{"tensorType", - {{"elemType", get_onnx_type(ins_shape.type())}, - {"shape", make_onnx_json_shape(ins_shape)}}}}; - ret["type"] = type; - return ret; -} - -std::unordered_map make_ins_uids(const module& mod) -{ - std::unordered_map ret; - int count = 0; - for(auto ins : iterator_for(mod)) - { - std::string var_name; - var_name = mod.name() + ":"; - var_name.append(ins->name() + ":"); - if(ins->name() == "@param") - { - var_name.append(any_cast(ins->get_operator()).parameter + ":"); - } - var_name.append("@" + std::to_string(count)); - count++; - ret.emplace(ins, var_name); - } - return ret; -} - -value make_graph(const module* mod) -{ - value graph = {{"node", value({})}, - {"initializer", value({})}, - {"input", value({})}, - {"output", value({})}, - {"valueInfo", value({})}}; - auto ins_uids = make_ins_uids(*mod); - for(auto ins = mod->begin(); ins != mod->end(); ++ins) - { - const auto& name = ins->name(); - if(name == "@literal") - { - graph["initializer"].push_back(make_onnx_json_literal(ins, ins_uids)); - } - else if(name == "@param") - { - graph["input"].push_back(make_onnx_json_in_out(ins, ins_uids)); - } - else if(name == "@return") - { - graph["output"].push_back(make_onnx_json_in_out(ins, ins_uids)); - } - else if(name.find("hip::hip_allocate_memory") != std::string::npos) - { - continue; - } - else - { - graph["node"].push_back(make_onnx_json_node(ins, ins_uids)); - const auto& outputs = ins->outputs(); - for(auto out_ins : outputs) - { - if(out_ins->name() != "@return") - { - graph["valueInfo"].push_back(make_onnx_json_edge(ins, out_ins, ins_uids)); - } - } - } - } - return graph; -} - -} // namespace - -std::string make_netron_output(const program& prog) -{ - value output; - auto prog_value = prog.to_value(); - // ONNX IR version 6 - // TODO: investigate sure how this affects things - output["irVersion"] = 6; - output["producerName"] = "AMDMIGraphX"; - output["producerVersion"] = prog_value.at("migraphx_version").to(); - for(auto& mod : prog.get_modules()) - { - auto graph = make_graph(mod); - output["graph"] = graph; - } - return to_pretty_json_string(output, 4); -} - -} // namespace MIGRAPHX_INLINE_NS -} // namespace migraphx diff --git a/src/onnx/netron_output.cpp b/src/onnx/netron_output.cpp new file mode 100644 index 00000000000..73d9b3a3285 --- /dev/null +++ b/src/onnx/netron_output.cpp @@ -0,0 +1,252 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace migraphx { +inline namespace MIGRAPHX_INLINE_NS { + +namespace onnx = onnx_for_migraphx; + +namespace { + +int get_onnx_type(shape::type_t s_type) +{ + switch(s_type) + { + case shape::float_type: return onnx::TensorProto::FLOAT; + case shape::uint8_type: return onnx::TensorProto::UINT8; + case shape::int8_type: return onnx::TensorProto::INT8; + case shape::uint16_type: return onnx::TensorProto::UINT16; + case shape::int16_type: return onnx::TensorProto::INT16; + case shape::int32_type: return onnx::TensorProto::INT32; + case shape::int64_type: return onnx::TensorProto::INT64; + case shape::bool_type: return onnx::TensorProto::BOOL; + case shape::half_type: return onnx::TensorProto::FLOAT16; + case shape::double_type: return onnx::TensorProto::DOUBLE; + case shape::uint32_type: return onnx::TensorProto::UINT32; + case shape::uint64_type: return onnx::TensorProto::UINT64; + case shape::bf16_type: return onnx::TensorProto::BFLOAT16; + case shape::fp8e4m3fn_type: return onnx::TensorProto::FLOAT8E4M3FN; + case shape::fp8e4m3fnuz_type: return onnx::TensorProto::FLOAT8E4M3FNUZ; + case shape::fp8e5m2_type: return onnx::TensorProto::FLOAT8E5M2; + case shape::fp8e5m2fnuz_type: return onnx::TensorProto::FLOAT8E5M2FNUZ; + case shape::tuple_type: return onnx::TensorProto::UNDEFINED; + case shape::fp4x2_type: return onnx::TensorProto::UINT4; + } + MIGRAPHX_THROW("MIGraphX type " + std::to_string(s_type) + " not supported"); +} + +std::unordered_map make_ins_uids(const module& mod) +{ + std::unordered_map ret; + int count = 0; + for(auto ins : iterator_for(mod)) + { + std::string var_name; + var_name = mod.name() + ":"; + var_name.append(ins->name() + ":"); + if(ins->name() == "@param") + { + var_name.append(any_cast(ins->get_operator()).parameter + ":"); + } + var_name.append("@" + std::to_string(count)); + count++; + ret.emplace(ins, var_name); + } + return ret; +} + +void set_shape_proto(onnx::TensorShapeProto* shape_proto, const shape& s) +{ + for(std::size_t len : s.lens()) + { + shape_proto->add_dim()->set_dim_value(len); + } +} + +void set_value_info(onnx::ValueInfoProto* vi, const std::string& name, const shape& s) +{ + vi->set_name(name); + auto* type = vi->mutable_type(); + auto* tensor = type->mutable_tensor_type(); + tensor->set_elem_type(get_onnx_type(s.type())); + set_shape_proto(tensor->mutable_shape(), s); +} + +void add_initializer(onnx::GraphProto* graph, + instruction_ref ins, + const std::unordered_map& ins_uids) +{ + auto* init = graph->add_initializer(); + init->set_name(ins_uids.at(ins)); + init->set_data_type(get_onnx_type(ins->get_shape().type())); + for(std::size_t d : ins->get_shape().lens()) + { + init->add_dims(d); + } +} + +void add_node(onnx::GraphProto* graph, + instruction_ref ins, + const std::unordered_map& ins_uids) +{ + auto* node = graph->add_node(); + + std::string op_type = ins->name(); + auto op_value = ins->get_operator().to_value(); + std::for_each(op_value.begin(), op_value.end(), [&](const auto& v) { + const std::string& attr_key = v.get_key(); + if(v.is_binary() or attr_key == "code_object") + { + return; + } + else if(attr_key == "symbol_name" or attr_key == "name") + { + op_type = migraphx::from_value(v); + } + else + { + auto* attr = node->add_attribute(); + attr->set_name(attr_key); + + auto val_string = v.template to(); + std::string sub_str = attr_key + ":"; + auto find_key = val_string.find(sub_str); + if(find_key != std::string::npos) + { + val_string = val_string.substr(find_key + sub_str.length() + 1); + } + attr->set_type(onnx::AttributeProto::STRING); + attr->set_s(val_string); + } + }); + + node->set_op_type(op_type); + node->set_name(ins_uids.at(ins)); + + for(instruction_ref input_ins : ins->inputs()) + { + auto name = input_ins->name(); + if(name == "@literal" or name == "@param") + { + node->add_input(ins_uids.at(input_ins)); + } + else if(name.find("hip::hip_allocate_memory") != std::string::npos) + { + continue; + } + else + { + node->add_input(ins_uids.at(input_ins) + "->" + ins_uids.at(ins)); + } + } + + for(instruction_ref output_ins : ins->outputs()) + { + if(output_ins->name() == "@return") + { + node->add_output(ins_uids.at(output_ins)); + } + else + { + node->add_output(ins_uids.at(ins) + "->" + ins_uids.at(output_ins)); + } + } + + if(not ins->get_debug_symbols().empty()) + { + auto* attr = node->add_attribute(); + attr->set_name("debug symbols"); + attr->set_type(onnx::AttributeProto::STRING); + attr->set_s(join_strings(ins->get_debug_symbols(), ", ")); + } +} + +void build_graph(onnx::GraphProto* graph, const module* mod) +{ + auto ins_uids = make_ins_uids(*mod); + for(auto ins = mod->begin(); ins != mod->end(); ++ins) + { + const auto& name = ins->name(); + if(name == "@literal") + { + add_initializer(graph, ins, ins_uids); + } + else if(name == "@param") + { + set_value_info(graph->add_input(), ins_uids.at(ins), ins->get_shape()); + } + else if(name == "@return") + { + set_value_info(graph->add_output(), ins_uids.at(ins), ins->get_shape()); + } + else if(name.find("hip::hip_allocate_memory") != std::string::npos) + { + continue; + } + else + { + add_node(graph, ins, ins_uids); + for(auto out_ins : ins->outputs()) + { + if(out_ins->name() != "@return") + { + set_value_info(graph->add_value_info(), + ins_uids.at(ins) + "->" + ins_uids.at(out_ins), + ins->get_shape()); + } + } + } + } +} + +} // namespace + +void write_netron_output(const program& prog, std::ostream& os) +{ + onnx::ModelProto model; + auto prog_value = prog.to_value(); + model.set_ir_version(prog.get_program_file_version()); + model.set_producer_name("AMDMIGraphX"); + model.set_producer_version(prog_value.at("migraphx_version").to()); + + // only exporting the main module + // TODO handle submodules as ONNX subgraphs + build_graph(model.mutable_graph(), prog.get_main_module()); + + model.SerializeToOstream(&os); +} + +} // namespace MIGRAPHX_INLINE_NS +} // namespace migraphx diff --git a/src/program.cpp b/src/program.cpp index c24be628f36..a571c0fa3ac 100644 --- a/src/program.cpp +++ b/src/program.cpp @@ -161,6 +161,8 @@ std::unordered_map program::get_parameter_shapes() const return mm->get_parameter_shapes(); } +int program::get_program_file_version() const { return program_file_version; } + std::size_t program::size() const { return impl->modules.size(); } std::vector program::get_output_shapes() const @@ -682,16 +684,10 @@ static std::string get_migraphx_version() return ss.str(); } -/* -program file version is for the data structure or format of the MXR file. Version should be bumped -if any changes occur to the format of the MXR file. -*/ -const int program_file_version = 8; - value program::to_value() const { value result; - result["version"] = program_file_version; + result["version"] = get_program_file_version(); result["migraphx_version"] = get_migraphx_version(); result["targets"] = migraphx::to_value(this->impl->targets); result["contexts"] = migraphx::to_value(this->impl->contexts); diff --git a/src/py/migraphx_py.cpp b/src/py/migraphx_py.cpp index ccd4321028a..67869d8529d 100644 --- a/src/py/migraphx_py.cpp +++ b/src/py/migraphx_py.cpp @@ -38,7 +38,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -457,6 +459,8 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m) .def("name", [](migraphx::instruction_ref i) { return i->name(); }) .def("get_literal", [](migraphx::instruction_ref i) { return i->get_literal().get_argument(); }) + .def("get_debug_symbols", + [](migraphx::instruction_ref i) { return i->get_debug_symbols(); }) .def(py::hash(py::self)) .def(py::self == py::self) .def(py::self != py::self); @@ -532,6 +536,20 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m) py::arg("macro"), py::arg("args"), py::arg("mod_args") = std::vector{}) + .def("has_debug_symbols", &migraphx::module::has_debug_symbols) + .def( + "add_debug_symbols", + [](migraphx::module& mm, + migraphx::instruction_ref ins, + const std::set& symbols) { mm.add_debug_symbols(ins, symbols); }, + py::arg("ins"), + py::arg("symbols")) + .def( + "remove_debug_symbols", + [](migraphx::module& mm, migraphx::instruction_ref ins) { + mm.remove_debug_symbols(ins); + }, + py::arg("ins")) .def("__repr__", [](const migraphx::module& mm) { return migraphx::to_string(mm); }) .def( "__iter__", @@ -605,6 +623,16 @@ MIGRAPHX_PYBIND11_MODULE(migraphx, m) return ss.str(); }) .def("sort", &migraphx::program::sort) + .def( + "write_netron_output", + [](const migraphx::program& p, const std::string& filename) { + std::ofstream os(filename, std::ios::binary); + if(not os.is_open()) + throw std::runtime_error("Failed to open file for writing: " + filename); + migraphx::write_netron_output(p, os); + }, + "Write program as ONNX protobuf binary viewable in Netron", + py::arg("filename")) .def("print", [](const migraphx::program& p) { std::cout << p << std::endl; }) .def("__eq__", std::equal_to{}) .def("__ne__", std::not_equal_to{}) diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index 1db4fb29d4f..8df09eb4fbb 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -1,7 +1,7 @@ # #################################################################################### # The MIT License (MIT) # -# Copyright (c) 2015-2025 Advanced Micro Devices, Inc. All rights reserved. +# Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -45,6 +45,8 @@ foreach(TEST ${TESTS}) rocm_clang_tidy_check(test_${BASE_NAME}) endforeach() +target_link_libraries(test_netron_output_test onnx-proto) + if(MIGRAPHX_ENABLE_GPU) # gpu tests file(GLOB GPU_TESTS CONFIGURE_DEPENDS gpu/*.cpp) diff --git a/test/api/CMakeLists.txt b/test/api/CMakeLists.txt index f4bd1d7140f..0740c840031 100644 --- a/test/api/CMakeLists.txt +++ b/test/api/CMakeLists.txt @@ -1,7 +1,7 @@ ##################################################################################### # The MIT License (MIT) # -# Copyright (c) 2015-2025 Advanced Micro Devices, Inc. All rights reserved. +# Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal @@ -60,6 +60,7 @@ add_api_test(lookup test_lookup.cpp ${TEST_ONNX_DIR}) add_api_test(module_construct test_module_construct.cpp ${TEST_ONNX_DIR}) add_api_test(dynamic_shape test_dynamic_shape.cpp ${TEST_ONNX_DIR}) add_api_test(ref test_cpu.cpp ${TEST_ONNX_DIR}) +add_api_test(netron_output test_netron_output.cpp ${TEST_ONNX_DIR}) add_api_test(save_load test_save_load.cpp ${TEST_ONNX_DIR}) add_api_test(op test_op_construct.cpp ${TEST_ONNX_DIR}) add_c_api_test(c_op test_c_op_construct.c ${TEST_ONNX_DIR}) diff --git a/test/api/test_netron_output.cpp b/test/api/test_netron_output.cpp new file mode 100644 index 00000000000..1da537dff27 --- /dev/null +++ b/test/api/test_netron_output.cpp @@ -0,0 +1,88 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include +#include +#include +#include "test.hpp" + +TEST_CASE(netron_output_cpp_api) +{ + auto p = migraphx::parse_onnx("conv_relu_maxpool_test.onnx"); + std::string filename = "migraphx_api_netron_output_test.onnx"; + p.write_netron_output(filename.c_str()); + + std::ifstream ifs(filename, std::ios::binary | std::ios::ate); + EXPECT(ifs.good()); + auto size = ifs.tellg(); + EXPECT(size > 0); + + std::remove(filename.c_str()); +} + +TEST_CASE(netron_output_c_api) +{ + migraphx_program_t p; + migraphx_onnx_options_t onnx_options; + migraphx_onnx_options_create(&onnx_options); + auto status = migraphx_parse_onnx(&p, "conv_relu_maxpool_test.onnx", onnx_options); + EXPECT(status == migraphx_status_success); + + std::string filename = "migraphx_c_api_netron_output_test.onnx"; + status = migraphx_program_write_netron_output(p, filename.c_str()); + EXPECT(status == migraphx_status_success); + + std::ifstream ifs(filename, std::ios::binary | std::ios::ate); + EXPECT(ifs.good()); + auto size = ifs.tellg(); + EXPECT(size > 0); + + std::remove(filename.c_str()); + migraphx_program_destroy(p); + migraphx_onnx_options_destroy(onnx_options); +} + +TEST_CASE(netron_output_constructed_program) +{ + migraphx::program p; + migraphx::module m = p.get_main_module(); + migraphx::shape s{migraphx_shape_float_type, {2, 3}}; + auto x = m.add_parameter("x", s); + auto y = m.add_parameter("y", s); + auto add_op = migraphx::operation("add"); + auto r = m.add_instruction(add_op, {x, y}); + m.add_return({r}); + + std::string filename = "migraphx_api_netron_constructed_test.onnx"; + p.write_netron_output(filename.c_str()); + + std::ifstream ifs(filename, std::ios::binary | std::ios::ate); + EXPECT(ifs.good()); + auto size = ifs.tellg(); + EXPECT(size > 0); + + std::remove(filename.c_str()); +} + +int main(int argc, const char* argv[]) { test::run(argc, argv); } diff --git a/test/netron_output_test.cpp b/test/netron_output_test.cpp new file mode 100644 index 00000000000..5b5bdffb120 --- /dev/null +++ b/test/netron_output_test.cpp @@ -0,0 +1,268 @@ +/* + * The MIT License (MIT) + * + * Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a copy + * of this software and associated documentation files (the "Software"), to deal + * in the Software without restriction, including without limitation the rights + * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + * copies of the Software, and to permit persons to whom the Software is + * furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + * THE SOFTWARE. + */ +#include +#include +#include +#include +#include +#include +#include + +namespace onnx = onnx_for_migraphx; + +static void set_value_info(onnx::ValueInfoProto* vi, + const std::string& name, + int elem_type, + const std::vector& dims) +{ + vi->set_name(name); + auto* tensor = vi->mutable_type()->mutable_tensor_type(); + tensor->set_elem_type(elem_type); + for(auto d : dims) + tensor->mutable_shape()->add_dim()->set_dim_value(d); +} + +static void add_initializer(onnx::GraphProto* graph, + const std::string& name, + int data_type, + const std::vector& dims) +{ + auto* init = graph->add_initializer(); + init->set_name(name); + init->set_data_type(data_type); + for(auto d : dims) + init->add_dims(d); +} + +static onnx::NodeProto* add_node(onnx::GraphProto* graph, + const std::string& op_type, + const std::string& name, + const std::vector& inputs, + const std::vector& outputs) +{ + auto* node = graph->add_node(); + node->set_op_type(op_type); + node->set_name(name); + for(const auto& in : inputs) + node->add_input(in); + for(const auto& out : outputs) + node->add_output(out); + return node; +} + +static void +add_string_attribute(onnx::NodeProto* node, const std::string& name, const std::string& value) +{ + auto* attr = node->add_attribute(); + attr->set_name(name); + attr->set_type(onnx::AttributeProto::STRING); + attr->set_s(value); +} + +static onnx::GraphProto parse_graph(const std::string& proto_binary) +{ + onnx::ModelProto model; + model.ParseFromString(proto_binary); + return model.graph(); +} + +TEST_CASE(netron_output_basic) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3}}); + auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 3}}); + auto sum = mm->add_instruction(migraphx::make_op("add"), x, y); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + + onnx::GraphProto expected; + add_node(&expected, + "add", + "main:add:@2", + {"main:@param:x:@1", "main:@param:y:@0"}, + {"main:@return:@3"}); + set_value_info(expected.add_input(), "main:@param:y:@0", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_input(), "main:@param:x:@1", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_output(), "main:@return:@3", onnx::TensorProto::FLOAT, {2, 3}); + + EXPECT(parse_graph(os.str()).SerializeAsString() == expected.SerializeAsString()); +} + +TEST_CASE(netron_output_with_literal) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3}}); + auto lit = mm->add_literal(migraphx::literal{{migraphx::shape::float_type, {2, 3}}, + {1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f}}); + auto sum = mm->add_instruction(migraphx::make_op("add"), x, lit); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + + onnx::GraphProto expected; + add_node(&expected, + "add", + "main:add:@2", + {"main:@param:x:@1", "main:@literal:@0"}, + {"main:@return:@3"}); + add_initializer(&expected, "main:@literal:@0", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_input(), "main:@param:x:@1", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_output(), "main:@return:@3", onnx::TensorProto::FLOAT, {2, 3}); + + EXPECT(parse_graph(os.str()).SerializeAsString() == expected.SerializeAsString()); +} + +TEST_CASE(netron_output_multiple_types) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto a = mm->add_parameter("a", {migraphx::shape::int32_type, {2, 3}}); + mm->add_parameter("b", {migraphx::shape::int64_type, {4}}); + mm->add_parameter("c", {migraphx::shape::half_type, {2, 3}}); + mm->add_parameter("d", {migraphx::shape::double_type, {2, 3}}); + mm->add_parameter("e", {migraphx::shape::bool_type, {2}}); + mm->add_parameter("f", {migraphx::shape::uint8_type, {2, 3}}); + mm->add_parameter("g", {migraphx::shape::bf16_type, {2, 3}}); + + auto lit = mm->add_literal( + migraphx::literal{{migraphx::shape::int32_type, {2, 3}}, {1, 2, 3, 4, 5, 6}}); + auto sum = mm->add_instruction(migraphx::make_op("add"), a, lit); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + + onnx::GraphProto expected; + add_node(&expected, + "add", + "main:add:@8", + {"main:@param:a:@7", "main:@literal:@0"}, + {"main:@return:@9"}); + add_initializer(&expected, "main:@literal:@0", onnx::TensorProto::INT32, {2, 3}); + set_value_info(expected.add_input(), "main:@param:g:@1", onnx::TensorProto::BFLOAT16, {2, 3}); + set_value_info(expected.add_input(), "main:@param:f:@2", onnx::TensorProto::UINT8, {2, 3}); + set_value_info(expected.add_input(), "main:@param:e:@3", onnx::TensorProto::BOOL, {2}); + set_value_info(expected.add_input(), "main:@param:d:@4", onnx::TensorProto::DOUBLE, {2, 3}); + set_value_info(expected.add_input(), "main:@param:c:@5", onnx::TensorProto::FLOAT16, {2, 3}); + set_value_info(expected.add_input(), "main:@param:b:@6", onnx::TensorProto::INT64, {4}); + set_value_info(expected.add_input(), "main:@param:a:@7", onnx::TensorProto::INT32, {2, 3}); + set_value_info(expected.add_output(), "main:@return:@9", onnx::TensorProto::INT32, {2, 3}); + + EXPECT(parse_graph(os.str()).SerializeAsString() == expected.SerializeAsString()); +} + +TEST_CASE(netron_output_op_attributes_and_chain) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3}}); + auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 3}}); + auto sm = mm->add_instruction(migraphx::make_op("softmax", {{"axis", 1}}), x); + auto sum = mm->add_instruction(migraphx::make_op("add"), sm, y); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + + onnx::GraphProto expected; + auto* sm_node = add_node(&expected, + "softmax", + "main:softmax:@2", + {"main:@param:x:@1"}, + {"main:softmax:@2->main:add:@3"}); + add_string_attribute(sm_node, "axis", "1"); + add_node(&expected, + "add", + "main:add:@3", + {"main:softmax:@2->main:add:@3", "main:@param:y:@0"}, + {"main:@return:@4"}); + set_value_info(expected.add_input(), "main:@param:y:@0", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_input(), "main:@param:x:@1", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_output(), "main:@return:@4", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_value_info(), + "main:softmax:@2->main:add:@3", + onnx::TensorProto::FLOAT, + {2, 3}); + + EXPECT(parse_graph(os.str()).SerializeAsString() == expected.SerializeAsString()); +} + +TEST_CASE(netron_output_debug_symbols) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3}}); + auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 3}}); + auto sum = mm->add_instruction(migraphx::make_op("add"), x, y); + mm->add_debug_symbols(sum, {"test_file.onnx:42", "origin_op:Add"}); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + + onnx::GraphProto expected; + auto* node = add_node(&expected, + "add", + "main:add:@2", + {"main:@param:x:@1", "main:@param:y:@0"}, + {"main:@return:@3"}); + add_string_attribute(node, "debug symbols", "origin_op:Add, test_file.onnx:42"); + set_value_info(expected.add_input(), "main:@param:y:@0", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_input(), "main:@param:x:@1", onnx::TensorProto::FLOAT, {2, 3}); + set_value_info(expected.add_output(), "main:@return:@3", onnx::TensorProto::FLOAT, {2, 3}); + + EXPECT(parse_graph(os.str()).SerializeAsString() == expected.SerializeAsString()); +} + +TEST_CASE(netron_output_roundtrip) +{ + migraphx::program p; + auto* mm = p.get_main_module(); + + auto x = mm->add_parameter("x", {migraphx::shape::float_type, {2, 3}}); + auto y = mm->add_parameter("y", {migraphx::shape::float_type, {2, 3}}); + auto sum = mm->add_instruction(migraphx::make_op("add"), x, y); + mm->add_return({sum}); + + std::ostringstream os; + migraphx::write_netron_output(p, os); + std::string output = os.str(); + + migraphx::onnx_options options; + options.skip_unknown_operators = true; + auto p2 = migraphx::parse_onnx_buffer(output.data(), output.size(), options); + EXPECT(p2.get_main_module()->size() > 0); +} + +int main(int argc, const char* argv[]) { test::run(argc, argv); } diff --git a/test/py/CMakeLists.txt b/test/py/CMakeLists.txt index 68354f111a4..6db68fa0f93 100644 --- a/test/py/CMakeLists.txt +++ b/test/py/CMakeLists.txt @@ -99,12 +99,14 @@ endif() add_py_test(ref test_cpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(save_load test_save_load.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) +add_py_test(netron_output test_netron_output.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(op test_op.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(shape test_shape.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(module_construct test_module_construct.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(macro test_macro.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(literal test_literal.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(autocast_fp8 test_autocast_fp8.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) +add_py_test(debug_symbols test_debug_symbols.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) if(MIGRAPHX_ENABLE_GPU) add_py_test(gpu_offload test_gpu_offload.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) add_py_test(gpu test_gpu.py common ${VENV} WORKING_DIRECTORY ${TEST_ONNX_DIR}) diff --git a/test/py/test_debug_symbols.py b/test/py/test_debug_symbols.py new file mode 100644 index 00000000000..253e80cde9d --- /dev/null +++ b/test/py/test_debug_symbols.py @@ -0,0 +1,217 @@ +##################################################################################### +# The MIT License (MIT) +# +# Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +##################################################################################### +import migraphx + + +def test_module_has_no_debug_symbols(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + mm.add_instruction(migraphx.op("add"), [x, y]) + assert not mm.has_debug_symbols() + + +def test_module_add_debug_symbols(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + + assert not mm.has_debug_symbols() + mm.add_debug_symbols(add_ins, {"sym_a", "sym_b"}) + assert mm.has_debug_symbols() + + +def test_module_add_debug_symbols_multiple_instructions(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + relu_ins = mm.add_instruction(migraphx.op("relu"), [add_ins]) + + mm.add_debug_symbols(add_ins, {"onnx:add"}) + mm.add_debug_symbols(relu_ins, {"onnx:relu"}) + + assert mm.has_debug_symbols() + assert add_ins.get_debug_symbols() == {"onnx:add"} + assert relu_ins.get_debug_symbols() == {"onnx:relu"} + + +def test_module_add_debug_symbols_merge(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + + mm.add_debug_symbols(add_ins, {"sym_a"}) + mm.add_debug_symbols(add_ins, {"sym_b"}) + assert add_ins.get_debug_symbols() == {"sym_a", "sym_b"} + + +def test_module_remove_debug_symbols(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + + mm.add_debug_symbols(add_ins, {"sym_a", "sym_b"}) + assert mm.has_debug_symbols() + + mm.remove_debug_symbols(add_ins) + assert add_ins.get_debug_symbols() == set() + assert not mm.has_debug_symbols() + + +def test_module_remove_one_of_two(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + relu_ins = mm.add_instruction(migraphx.op("relu"), [add_ins]) + + mm.add_debug_symbols(add_ins, {"sym_add"}) + mm.add_debug_symbols(relu_ins, {"sym_relu"}) + + mm.remove_debug_symbols(add_ins) + assert add_ins.get_debug_symbols() == set() + assert relu_ins.get_debug_symbols() == {"sym_relu"} + assert mm.has_debug_symbols() + + +def test_module_remove_then_readd(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + + mm.add_debug_symbols(add_ins, {"old_sym"}) + mm.remove_debug_symbols(add_ins) + assert not mm.has_debug_symbols() + + mm.add_debug_symbols(add_ins, {"new_sym"}) + assert add_ins.get_debug_symbols() == {"new_sym"} + assert mm.has_debug_symbols() + + +def test_instruction_get_debug_symbols(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + + assert add_ins.get_debug_symbols() == set() + + mm.add_debug_symbols(add_ins, {"sym_a", "sym_b", "sym_c"}) + assert add_ins.get_debug_symbols() == {"sym_a", "sym_b", "sym_c"} + + +def test_iterate_instructions_debug_symbols(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + z = mm.add_parameter("z", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + mul_ins = mm.add_instruction(migraphx.op("mul"), [add_ins, z]) + relu_ins = mm.add_instruction(migraphx.op("relu"), [mul_ins]) + mm.add_return([relu_ins]) + + mm.add_debug_symbols(add_ins, {"onnx:add"}) + mm.add_debug_symbols(mul_ins, {"onnx:mul"}) + mm.add_debug_symbols(relu_ins, {"onnx:relu"}) + + all_symbols = set() + for ins in mm: + all_symbols.update(ins.get_debug_symbols()) + + assert all_symbols == {"onnx:add", "onnx:mul", "onnx:relu"} + + +def test_iterate_only_symbolized_instructions(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + relu_ins = mm.add_instruction(migraphx.op("relu"), [add_ins]) + mm.add_return([relu_ins]) + + mm.add_debug_symbols(relu_ins, {"onnx:relu"}) + + symbolized = {ins.name(): ins.get_debug_symbols() + for ins in mm if ins.get_debug_symbols()} + assert len(symbolized) == 1 + assert symbolized["relu"] == {"onnx:relu"} + + +def test_parse_onnx_with_debug_symbols(): + p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx", + use_debug_symbols=True) + mm = p.get_main_module() + assert mm.has_debug_symbols() + + all_symbols = set() + for ins in mm: + all_symbols.update(ins.get_debug_symbols()) + assert len(all_symbols) > 0 + + +def test_parse_onnx_without_debug_symbols(): + p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx", + use_debug_symbols=False) + mm = p.get_main_module() + assert not mm.has_debug_symbols() + + +if __name__ == "__main__": + test_module_has_no_debug_symbols() + test_module_add_debug_symbols() + test_module_add_debug_symbols_multiple_instructions() + test_module_add_debug_symbols_merge() + test_module_remove_debug_symbols() + test_module_remove_one_of_two() + test_module_remove_then_readd() + test_instruction_get_debug_symbols() + test_iterate_instructions_debug_symbols() + test_iterate_only_symbolized_instructions() + test_parse_onnx_with_debug_symbols() + test_parse_onnx_without_debug_symbols() diff --git a/test/py/test_netron_output.py b/test/py/test_netron_output.py new file mode 100644 index 00000000000..aec1ecb4f22 --- /dev/null +++ b/test/py/test_netron_output.py @@ -0,0 +1,59 @@ +##################################################################################### +# The MIT License (MIT) +# +# Copyright (c) 2015-2026 Advanced Micro Devices, Inc. All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to deal +# in the Software without restriction, including without limitation the rights +# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +# copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +# THE SOFTWARE. +##################################################################################### +import migraphx, tempfile, os + + +def test_netron_output_parsed_model(): + p = migraphx.parse_onnx("conv_relu_maxpool_test.onnx") + + with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as t: + filename = t.name + + p.write_netron_output(filename) + size = os.path.getsize(filename) + assert size > 0, "Netron output file is empty" + os.remove(filename) + + +def test_netron_output_constructed_program(): + p = migraphx.program() + mm = p.get_main_module() + s = migraphx.shape(lens=[2, 3], type="float") + x = mm.add_parameter("x", s) + y = mm.add_parameter("y", s) + add_ins = mm.add_instruction(migraphx.op("add"), [x, y]) + mm.add_return([add_ins]) + + with tempfile.NamedTemporaryFile(suffix=".onnx", delete=False) as t: + filename = t.name + + p.write_netron_output(filename) + size = os.path.getsize(filename) + assert size > 0, "Netron output file is empty" + os.remove(filename) + + +if __name__ == "__main__": + test_netron_output_parsed_model() + test_netron_output_constructed_program() diff --git a/tools/api/api.cpp b/tools/api/api.cpp index 05181653be4..a473705a016 100644 --- a/tools/api/api.cpp +++ b/tools/api/api.cpp @@ -39,7 +39,9 @@ #include #include #include +#include #include +#include #include #include @@ -337,6 +339,15 @@ static std::vector get_output_shapes(program& p) { return p.get_output_sh static void print_program(const program& p) { std::cout << p << std::endl; } +static void write_netron_output_file(const program& p, const char* filename) +{ + std::ofstream os(filename, std::ios::binary); + if(not os.is_open()) + MIGRAPHX_THROW(migraphx_status_bad_param, + "Failed to open file for writing: " + std::string(filename)); + write_netron_output(p, os); +} + static void print_module(const module& m) { std::cout << m << std::endl; } static migraphx::instruction_ref add_allocation(module& m, const migraphx::shape& s)