Skip to content

Unable to export detection model to ONNX #1995

@AakashKumarNain

Description

@AakashKumarNain

🐛 Bug

I was trying to convert the pretrained resnet50_fpn detection model to onnx but I got a runtime error while trying it.

To Reproduce

import os
import numpy as np
import matplotlib.pyplot as plt

import torch
import torchvision
from torchvision.models.detection import FasterRCNN
from torchvision.models.detection.rpn import AnchorGenerator
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor

# load model
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True)
model.eval()

#converison
batch_size=1
x = torch.randn(batch_size, 3, 300, 300, requires_grad=True)
outputs = ['boxes', 'labels', 'scores', 'masks']
torch.onnx.export(model,                     # model being run
                  x,                         # model input (or a tuple for multiple inputs)
                  "resnet50detection.onnx",  # where to save the model (can be a file or file-like object)
                  export_params=True,        # store the trained parameter weights inside the model file
                  opset_version=10,          # the ONNX version to export the model to
                  do_constant_folding=True,  # whether to execute constant folding for optimization
                  input_names = ['input'],   # the model's input names
                  output_names = outputs,    # the model's output names
                  dynamic_axes={'input' : {0 : 'batch_size'},    # variable lenght axes
                                'output' : {0 : 'batch_size'}})

Expected behavior

An onnx format based converted model

Environment

  • PyTorch / torchvision Version (e.g., 1.0 / 0.4.0): 1.4/0.5
  • OS (e.g., Linux): Linux
  • How you installed PyTorch / torchvision (conda, pip, source): conda
  • Build command you used (if compiling from source):
  • Python version: 3.7
  • CUDA/cuDNN version: N/A
  • GPU models and configuration: N/A
  • Any other relevant information:

Additional context

Here is the error I am getting

---------------------------------------------------------------------------
RuntimeError                              Traceback (most recent call last)
<ipython-input-6-ce14949e7fa3> in <module>
     11                   output_names = outputs,    # the model's output names
     12                   dynamic_axes={'input' : {0 : 'batch_size'},    # variable lenght axes
---> 13                                 'output' : {0 : 'batch_size'}})

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/__init__.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
    146                         operator_export_type, opset_version, _retain_param_name,
    147                         do_constant_folding, example_outputs,
--> 148                         strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
    149 
    150 

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/utils.py in export(model, args, f, export_params, verbose, training, input_names, output_names, aten, export_raw_ir, operator_export_type, opset_version, _retain_param_name, do_constant_folding, example_outputs, strip_doc_string, dynamic_axes, keep_initializers_as_inputs)
     64             _retain_param_name=_retain_param_name, do_constant_folding=do_constant_folding,
     65             example_outputs=example_outputs, strip_doc_string=strip_doc_string,
---> 66             dynamic_axes=dynamic_axes, keep_initializers_as_inputs=keep_initializers_as_inputs)
     67 
     68 

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/utils.py in _export(model, args, f, export_params, verbose, training, input_names, output_names, operator_export_type, export_type, example_outputs, propagate, opset_version, _retain_param_name, do_constant_folding, strip_doc_string, dynamic_axes, keep_initializers_as_inputs, fixed_batch_size)
    414                                                         example_outputs, propagate,
    415                                                         _retain_param_name, do_constant_folding,
--> 416                                                         fixed_batch_size=fixed_batch_size)
    417 
    418         # TODO: Don't allocate a in-memory string for the protobuf

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/utils.py in _model_to_graph(model, args, verbose, training, input_names, output_names, operator_export_type, example_outputs, propagate, _retain_param_name, do_constant_folding, _disable_torch_constant_prop, fixed_batch_size)
    294     graph = _optimize_graph(graph, operator_export_type,
    295                             _disable_torch_constant_prop=_disable_torch_constant_prop,
--> 296                             fixed_batch_size=fixed_batch_size, params_dict=params_dict)
    297 
    298     if isinstance(model, torch.jit.ScriptModule) or isinstance(model, torch.jit.ScriptFunction):

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/utils.py in _optimize_graph(graph, operator_export_type, _disable_torch_constant_prop, fixed_batch_size, params_dict)
    133         torch._C._jit_pass_erase_number_types(graph)
    134 
--> 135         graph = torch._C._jit_pass_onnx(graph, operator_export_type)
    136         torch._C._jit_pass_lint(graph)
    137 

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/__init__.py in _run_symbolic_function(*args, **kwargs)
    177 def _run_symbolic_function(*args, **kwargs):
    178     from torch.onnx import utils
--> 179     return utils._run_symbolic_function(*args, **kwargs)
    180 
    181 

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/utils.py in _run_symbolic_function(g, n, inputs, env, operator_export_type)
    655                                   .format(op_name, opset_version, op_name))
    656                 op_fn = sym_registry.get_registered_op(op_name, '', opset_version)
--> 657                 return op_fn(g, *inputs, **attrs)
    658 
    659         elif ns == "prim":

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/symbolic_helper.py in wrapper(g, *args)
    126             # some args may be optional, so the length may be smaller
    127             assert len(arg_descriptors) >= len(args)
--> 128             args = [_parse_arg(arg, arg_desc) for arg, arg_desc in zip(args, arg_descriptors)]
    129             return fn(g, *args)
    130         # In Python 2 functools.wraps chokes on partially applied functions, so we need this as a workaround

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/symbolic_helper.py in <listcomp>(.0)
    126             # some args may be optional, so the length may be smaller
    127             assert len(arg_descriptors) >= len(args)
--> 128             args = [_parse_arg(arg, arg_desc) for arg, arg_desc in zip(args, arg_descriptors)]
    129             return fn(g, *args)
    130         # In Python 2 functools.wraps chokes on partially applied functions, so we need this as a workaround

~/miniconda3/envs/torchenv/lib/python3.7/site-packages/torch/onnx/symbolic_helper.py in _parse_arg(value, desc)
     79                 if v.node().kind() != 'onnx::Constant':
     80                     raise RuntimeError("Failed to export an ONNX attribute '" + v.node().kind() +
---> 81                                        "', since it's not constant, please try to make "
     82                                        "things (e.g., kernel size) static if possible")
     83             return [int(v.node()['value']) for v in value.node().inputs()]

RuntimeError: Failed to export an ONNX attribute 'onnx::Sub', since it's not constant, please try to make things (e.g., kernel size) static if possible

Metadata

Metadata

Assignees

No one assigned

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions