Python torch.onnx() Examples

The following are 30 code examples of torch.onnx(). You can vote up the ones you like or vote down the ones you don't like, and go to the original project or source file by following the links above each example. You may also want to check out all available functions/classes of the module torch , or try the search function .
Example #1
Source File: export.py    From robosat with MIT License 7 votes vote down vote up
def main(args):
    dataset = load_config(args.dataset)

    num_classes = len(dataset["common"]["classes"])
    net = UNet(num_classes)

    def map_location(storage, _):
        return storage.cpu()

    chkpt = torch.load(args.checkpoint, map_location=map_location)
    net = torch.nn.DataParallel(net)
    net.load_state_dict(chkpt["state_dict"])

    # Todo: make input channels configurable, not hard-coded to three channels for RGB
    batch = torch.autograd.Variable(torch.randn(1, 3, args.image_size, args.image_size))

    torch.onnx.export(net, batch, args.model) 
Example #2
Source File: pytorch_to_onnx.py    From open_model_zoo with Apache License 2.0 6 votes vote down vote up
def convert_to_onnx(model, input_shape, output_file, input_names, output_names):
    """Convert PyTorch model to ONNX and check the resulting onnx model"""

    output_file.parent.mkdir(parents=True, exist_ok=True)
    model.eval()
    dummy_input = torch.randn(input_shape)
    model(dummy_input)
    torch.onnx.export(model, dummy_input, str(output_file), verbose=False,
                      input_names=input_names.split(','), output_names=output_names.split(','))

    # Model check after conversion
    model = onnx.load(str(output_file))
    try:
        onnx.checker.check_model(model)
        print('ONNX check passed successfully.')
    except onnx.onnx_cpp2py_export.checker.ValidationError as exc:
        sys.exit('ONNX check failed with error: ' + str(exc)) 
Example #3
Source File: neural_style.py    From PyTorch with MIT License 6 votes vote down vote up
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
Example #4
Source File: onnx_converter.py    From OLive with MIT License 6 votes vote down vote up
def pytorch2onnx(args):
    # PyTorch exports to ONNX without the need for an external converter
    import torch
    from torch.autograd import Variable
    import torch.onnx
    import torchvision
    # Create input with the correct dimensions of the input of your model
    if args.model_input_shapes == None:
        raise ValueError("Please provide --model_input_shapes to convert Pytorch models.")
    dummy_model_input = []
    if len(args.model_input_shapes) == 1:
        dummy_model_input = Variable(torch.randn(*args.model_input_shapes))
    else:
        for shape in args.model_input_shapes:
            dummy_model_input.append(Variable(torch.randn(*shape)))

    # load the PyTorch model
    model = torch.load(args.model, map_location="cpu")

    # export the PyTorch model as an ONNX protobuf
    torch.onnx.export(model, dummy_model_input, args.output_onnx_path) 
Example #5
Source File: onnx_converter.py    From OLive with MIT License 6 votes vote down vote up
def convert_models(args):
    # Quick format check
    model_extension = get_extension(args.model)
    if (args.model_type == "onnx" or model_extension == "onnx"):
        print("Input model is already ONNX model. Skipping conversion.")
        if args.model != args.output_onnx_path:
            copyfile(args.model, args.output_onnx_path)
        return

    if converters.get(args.model_type) == None:
        raise ValueError('Model type {} is not currently supported. \n\
            Please select one of the following model types -\n\
                cntk, coreml, keras, pytorch, scikit-learn, tensorflow'.format(args.model_type))

    suffix = suffix_format_map.get(model_extension)

    if suffix != None and suffix != args.model_type:
        raise ValueError('model with extension {} do not come from {}'.format(model_extension, args.model_type))

    # Find the corresponding converter for current model
    converter = converters.get(args.model_type)
    # Run converter
    converter(args) 
Example #6
Source File: main.py    From examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
Example #7
Source File: neural_style.py    From examples with BSD 3-Clause "New" or "Revised" License 6 votes vote down vote up
def stylize_onnx_caffe2(content_image, args):
    """
    Read ONNX model and run it using Caffe2
    """

    assert not args.export_onnx

    import onnx
    import onnx_caffe2.backend

    model = onnx.load(args.model)

    prepared_backend = onnx_caffe2.backend.prepare(model, device='CUDA' if args.cuda else 'CPU')
    inp = {model.graph.input[0].name: content_image.numpy()}
    c2_out = prepared_backend.run(inp)[0]

    return torch.from_numpy(c2_out) 
Example #8
Source File: test_caffe2.py    From onnx-fb-universe with MIT License 6 votes vote down vote up
def test_dcgan(self):
        # dcgan is flaky on some seeds, see:
        # https://github.com/ProjectToffee/onnx/pull/70
        torch.manual_seed(1)
        if torch.cuda.is_available():
            torch.cuda.manual_seed_all(1)

        netD = dcgan._netD(1)
        netD.apply(dcgan.weights_init)
        input = Variable(torch.randn(BATCH_SIZE, 3, dcgan.imgsz, dcgan.imgsz))
        self.run_model_test(netD, train=False, batch_size=BATCH_SIZE,
                            input=input)

        netG = dcgan._netG(1)
        netG.apply(dcgan.weights_init)
        state_dict = model_zoo.load_url(model_urls['dcgan_b'], progress=False)
        # state_dict = model_zoo.load_url(model_urls['dcgan_f'], progress=False)
        noise = Variable(
            torch.randn(BATCH_SIZE, dcgan.nz, 1, 1).normal_(0, 1))
        self.run_model_test(netG, train=False, batch_size=BATCH_SIZE,
                            input=noise, state_dict=state_dict, rtol=1e-2, atol=1e-6) 
Example #9
Source File: convert_onnx.py    From inference with Apache License 2.0 6 votes vote down vote up
def onnx_inference(args):
    # Load the ONNX model
    model = onnx.load("models/deepspeech_{}.onnx".format(args.continue_from))

    # Check that the IR is well formed
    onnx.checker.check_model(model)

    onnx.helper.printable_graph(model.graph)

    print("model checked, preparing backend!")
    rep = backend.prepare(model, device="CPU")  # or "CPU"

    print("running inference!")

    # Hard coded input dim
    inputs = np.random.randn(16, 1, 161, 129).astype(np.float32)

    start = time.time()
    outputs = rep.run(inputs)
    print("time used: {}".format(time.time() - start))
    # To run networks with more than one input, pass a tuple
    # rather than a single numpy ndarray.
    print(outputs[0]) 
Example #10
Source File: test_operators.py    From onnx-fb-universe with MIT License 6 votes vote down vote up
def test_symbolic_override_nested(self):
        def symb(g, x, y):
            assert isinstance(x, torch._C.Value)
            assert isinstance(y[0], torch._C.Value)
            assert isinstance(y[1], torch._C.Value)
            return g.op('Sum', x, y[0], y[1]), (
                g.op('Neg', x), g.op('Neg', y[0]))

        @torch.onnx.symbolic_override_first_arg_based(symb)
        def foo(x, y):
            return x + y[0] + y[1], (-x, -y[0])

        class BigModule(torch.nn.Module):
            def forward(self, x, y):
                return foo(x, y)

        inp = (Variable(torch.FloatTensor([1])),
               (Variable(torch.FloatTensor([2])),
                Variable(torch.FloatTensor([3]))))
        BigModule()(*inp)
        self.assertONNX(BigModule(), inp) 
Example #11
Source File: onnx_export.py    From openprotein with MIT License 6 votes vote down vote up
def predict():
    list_of_files = glob.glob('output/models/*')  # * means all if need specific format then *.csv
    model_path = max(list_of_files, key=os.path.getctime)

    print("Generating ONNX from model:", model_path)
    model = torch.load(model_path)

    input_sequences = [
        "SRSLVISTINQISEDSKEFYFTLDNGKTMFPSNSQAWGGEKFENGQRAFVIFNELEQPVNGYDYNIQVRDITKVLTKEIVTMDDEE" \
        "NTEEKIGDDKINATYMWISKDKKYLTIEFQYYSTHSEDKKHFLNLVINNKDNTDDEYINLEFRHNSERDSPDHLGEGYVSFKLDKI" \
        "EEQIEGKKGLNIRVRTLYDGIKNYKVQFP"]

    input_sequences_encoded = list(torch.IntTensor(encode_primary_string(aa))
                                   for aa in input_sequences)

    print("Exporting to ONNX...")

    output_path = "./tests/output/openprotein.onnx"
    onnx_from_model(model, input_sequences_encoded, output_path)

    print("Wrote ONNX to", output_path) 
Example #12
Source File: main.py    From temporal-shift-module with Apache License 2.0 6 votes vote down vote up
def torch2tvm_module(torch_module: torch.nn.Module, torch_inputs: Tuple[torch.Tensor, ...], target):
    torch_module.eval()
    input_names = []
    input_shapes = {}
    with torch.no_grad():
        for index, torch_input in enumerate(torch_inputs):
            name = "i" + str(index)
            input_names.append(name)
            input_shapes[name] = torch_input.shape
        buffer = io.BytesIO()
        torch.onnx.export(torch_module, torch_inputs, buffer, input_names=input_names, output_names=["o" + str(i) for i in range(len(torch_inputs))])
        outs = torch_module(*torch_inputs)
        buffer.seek(0, 0)
        onnx_model = onnx.load_model(buffer)
        relay_module, params = tvm.relay.frontend.from_onnx(onnx_model, shape=input_shapes)
    with tvm.relay.build_config(opt_level=3):
        graph, tvm_module, params = tvm.relay.build(relay_module, target, params=params)
    return graph, tvm_module, params 
Example #13
Source File: test_operators.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def test_symbolic_override(self):
        """Lifted from fast-neural-style: custom implementation of instance norm
        to be mapped to ONNX operator"""

        class CustomInstanceNorm(torch.nn.Module):
            def __init__(self, dim, eps=1e-9):
                super(CustomInstanceNorm, self).__init__()
                self.scale = nn.Parameter(torch.FloatTensor(dim).uniform_())
                self.shift = nn.Parameter(torch.FloatTensor(dim).zero_())
                self.eps = eps

            def forward(self, x):
                return self._run_forward(x, self.scale, self.shift, eps=self.eps)

            @staticmethod
            @torch.onnx.symbolic_override(
                lambda g, x, scale, shift, eps: g.op(
                    'InstanceNormalization', x, scale, shift, epsilon_f=eps)
            )
            def _run_forward(x, scale, shift, eps):
                # since we hand-roll instance norm it doesn't perform well all in fp16
                n = x.size(2) * x.size(3)
                t = x.view(x.size(0), x.size(1), n)
                mean = torch.mean(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x)
                # Calculate the biased var. torch.var returns unbiased var
                var = torch.var(t, 2).unsqueeze(2).unsqueeze(3).expand_as(x) * ((float(n) - 1) / float(n))
                scale_broadcast = scale.unsqueeze(1).unsqueeze(1).unsqueeze(0)
                scale_broadcast = scale_broadcast.expand_as(x)
                shift_broadcast = shift.unsqueeze(1).unsqueeze(1).unsqueeze(0)
                shift_broadcast = shift_broadcast.expand_as(x)
                out = (x - mean) / torch.sqrt(var + eps)
                out = out * scale_broadcast + shift_broadcast
                return out

        instnorm = CustomInstanceNorm(10)
        x = Variable(torch.randn(2, 10, 32, 32))
        self.assertONNX(instnorm, x) 
Example #14
Source File: test_operators.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def assertONNX(self, f, args, params=tuple(), **kwargs):
        if isinstance(f, nn.Module):
            m = f
        else:
            m = FuncModule(f, params)
        onnx_model_pb = export_to_string(m, args, **kwargs)
        model_def = self.assertONNXExpected(onnx_model_pb)
        if _onnx_test:
            test_function = inspect.stack()[1][0].f_code.co_name
            test_name = test_function[0:4] + "_operator" + test_function[4:]
            output_dir = os.path.join(test_onnx_common.pytorch_operator_dir, test_name)
            # Assume:
            #     1) the old test should be delete before the test.
            #     2) only one assertONNX in each test, otherwise will override the data.
            assert not os.path.exists(output_dir), "{} should not exist!".format(output_dir)
            os.makedirs(output_dir)
            with open(os.path.join(output_dir, "model.onnx"), 'wb') as file:
                file.write(model_def.SerializeToString())
            data_dir = os.path.join(output_dir, "test_data_set_0")
            os.makedirs(data_dir)
            if isinstance(args, Variable):
                args = (args,)
            for index, var in enumerate(flatten(args)):
                tensor = numpy_helper.from_array(var.data.numpy())
                with open(os.path.join(data_dir, "input_{}.pb".format(index)), 'wb') as file:
                    file.write(tensor.SerializeToString())
            outputs = m(*args)
            if isinstance(outputs, Variable):
                outputs = (outputs,)
            for index, var in enumerate(flatten(outputs)):
                tensor = numpy_helper.from_array(var.data.numpy())
                with open(os.path.join(data_dir, "output_{}.pb".format(index)), 'wb') as file:
                    file.write(tensor.SerializeToString()) 
Example #15
Source File: test_operators.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def assertONNXExpected(self, binary_pb, subname=None):
        model_def = onnx.ModelProto.FromString(binary_pb)
        onnx.checker.check_model(model_def)
        # doc_string contains stack trace in it, strip it
        onnx.helper.strip_doc_string(model_def)
        self.assertExpected(google.protobuf.text_format.MessageToString(model_def, float_format='.15g'), subname)
        return model_def 
Example #16
Source File: test_caffe2.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def test_dynamic_sizes(self):
        class MyModel(torch.nn.Module):
            def __init__(self):
                super(MyModel, self).__init__()
            def forward(self, x):
                shape = torch.onnx.operators.shape_as_tensor(x)
                new_shape = torch.cat((torch.LongTensor([-1]), shape[0].view(1)))
                return torch.onnx.operators.reshape_from_tensor_shape(x, new_shape)
        x = Variable(torch.randn(3, 5, 7))
        self.run_model_test(MyModel(), train=False, input=x, batch_size=BATCH_SIZE, use_gpu=False) 
Example #17
Source File: test_models.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def exportTest(self, model, inputs, subname=None, rtol=1e-2, atol=1e-7):
        trace = torch.onnx.utils._trace(model, inputs)
        torch._C._jit_pass_lint(trace.graph())
        verify(model, inputs, backend, rtol=rtol, atol=atol) 
Example #18
Source File: test_caffe2.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def do_export(model, inputs, *args, **kwargs):
    f = io.BytesIO()
    out = torch.onnx._export(model, inputs, f, *args, **kwargs)
    return f.getvalue(), out 
Example #19
Source File: test_caffe2.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def skipIfEmbed(func):
    def wrapper(self):
        if self.embed_params:
            raise unittest.SkipTest("Skip embed_params verify test")
        return func(self)
    return wrapper


#def import_model(proto, input, workspace=None, use_gpu=True):
#    model_def = onnx.ModelProto.FromString(proto)
#    onnx.checker.check_model(model_def)
#
#    if workspace is None:
#        workspace = {}
#    if isinstance(input, tuple):
#        for i in range(len(input)):
#            workspace[model_def.graph.input[i]] = input[i]
#    else:
#        workspace[model_def.graph.input[0]] = input
#
#    caffe2_out_workspace = c2.run_model(
#        init_graph=None,
#        predict_graph=graph_def,
#        inputs=workspace,
#        use_gpu=use_gpu)
#    caffe2_out = caffe2_out_workspace[0]
#    return caffe2_out 
Example #20
Source File: verify.py    From onnx-fb-universe with MIT License 5 votes vote down vote up
def equalAndThen(self, x, y, msg, k):
        """
        Helper for implementing 'requireEqual' and 'checkEqual'.  Upon failure,
        invokes continuation 'k' with the error message.
        """
        if isinstance(x, onnx.TensorProto) and isinstance(y, onnx.TensorProto):
            self.equalAndThen(x.name, y.name, msg, k)
            # Use numpy for the comparison
            t1 = onnx.numpy_helper.to_array(x)
            t2 = onnx.numpy_helper.to_array(y)
            new_msg = "{}In embedded parameter '{}'".format(colonize(msg), x.name)
            self.equalAndThen(t1, t2, new_msg, k)
        elif isinstance(x, np.ndarray) and isinstance(y, np.ndarray):
            try:
                np.testing.assert_equal(x, y)
            except AssertionError as e:
                raise
                k("{}{}".format(colonize(msg, ": "), str(e).lstrip()))
        else:
            if x != y:
                # TODO: Better algorithm for lists
                sx = str(x)
                sy = str(y)
                if len(sx) > 40 or len(sy) > 40 or '\n' in sx or '\n' in sy:
                    # long form
                    l = "=" * 50
                    k("\n{}The value\n{}\n{}\n{}\n\ndoes not equal\n\n{}\n{}\n{}"
                        .format(colonize(msg, ":\n"), l, sx, l, l, sy, l))
                else:
                    k("{}{} != {}".format(colonize(msg), sx, sy)) 
Example #21
Source File: onnx_export_tmhmm3.py    From openprotein with MIT License 5 votes vote down vote up
def onnx_from_model(model, input_str, path):
    """Export to onnx"""
    torch.onnx.export(model, input_str, path,
                      enable_onnx_checker=True, opset_version=10, verbose=True,
                      input_names=['embedded_sequences', 'mask'],   # the model's input names
                      output_names=['emissions',
                                    'crf_start_transitions',
                                    'crf_transitions',
                                    'crf_end_transitions'],  # the model's output names
                      dynamic_axes={
                          'mask': {0: 'batch_size'},
                          'embedded_sequences': {0: 'max_seq_length', 1: 'batch_size'},
                          'emissions': {0: 'max_seq_length', 1: 'batch_size'},
                      }
                      ) 
Example #22
Source File: onnx_export.py    From openprotein with MIT License 5 votes vote down vote up
def onnx_from_model(model, input_str, path):
    """Export to onnx"""
    torch.onnx.export(model, input_str, path, opset_version=10, verbose=True) 
Example #23
Source File: main.py    From PyTorch with MIT License 5 votes vote down vote up
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
Example #24
Source File: neural_style.py    From PyTorch with MIT License 5 votes vote down vote up
def stylize(args):
    device = torch.device("cuda" if args.cuda else "cpu")

    content_image = utils.load_image(args.content_image, scale=args.content_scale)
    content_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Lambda(lambda x: x.mul(255))
    ])
    content_image = content_transform(content_image)
    content_image = content_image.unsqueeze(0).to(device)

    if args.model.endswith(".onnx"):
        output = stylize_onnx_caffe2(content_image, args)
    else:
        with torch.no_grad():
            style_model = TransformerNet()
            state_dict = torch.load(args.model)
            # remove saved deprecated running_* keys in InstanceNorm from the checkpoint
            for k in list(state_dict.keys()):
                if re.search(r'in\d+\.running_(mean|var)$', k):
                    del state_dict[k]
            style_model.load_state_dict(state_dict)
            style_model.to(device)
            if args.export_onnx:
                assert args.export_onnx.endswith(".onnx"), "Export model file should end with .onnx"
                output = torch.onnx._export(style_model, content_image, args.export_onnx).cpu()
            else:
                output = style_model(content_image).cpu()
    utils.save_image(args.output_image, output[0]) 
Example #25
Source File: learners.py    From TorchFusion with MIT License 5 votes vote down vote up
def disc_to_onnx(self, path, input_size, label=None, input_type=torch.FloatTensor, **kwargs):
        input = torch.randn(input_size).type(input_type).unsqueeze(0)
        if label is None:
            inputs = Variable(input.cuda() if self.cuda else input)
        else:
            label = torch.randn(1, 1).fill_(label)
            inputs = [Variable(input.cuda() if self.cuda else input), label.cuda() if self.cuda else label]

        return onnx._export(self.disc_model, inputs, f=path, **kwargs) 
Example #26
Source File: broadcast_mul.py    From onnx2caffe with MIT License 5 votes vote down vote up
def export(dir):
    dummy_input = Variable(torch.randn(1, 3, 4, 4))
    model = broadcast_mul()
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"broadcast_mul.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"broadcast_mul.onnx"), verbose=True) 
Example #27
Source File: train_rnn.py    From relational-rnn-pytorch with Apache License 2.0 5 votes vote down vote up
def export_onnx(path, batch_size, seq_len):
    print('The model is also exported in ONNX format at {}'.
          format(os.path.realpath(args.onnx_export)))
    model.eval()
    dummy_input = torch.LongTensor(seq_len * batch_size).zero_().view(-1, batch_size).to(device)
    hidden = model.init_hidden(batch_size)
    torch.onnx.export(model, (dummy_input, hidden), path)


# Loop over epochs. 
Example #28
Source File: resnet.py    From onnx2caffe with MIT License 5 votes vote down vote up
def export(dir):
    file_path = os.path.realpath(__file__)
    file_dir = os.path.dirname(file_path)
    dummy_input = Variable(torch.randn(1, 3, 32, 32))
    model = ResNet34()
    # model = load_network(model,os.path.join(file_dir,'..','model','pose_v02.pth'))
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"resnet.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"resnet.onnx"), verbose=True) 
Example #29
Source File: googlenet.py    From onnx2caffe with MIT License 5 votes vote down vote up
def export(dir):
    file_path = os.path.realpath(__file__)
    file_dir = os.path.dirname(file_path)
    dummy_input = Variable(torch.randn(1, 3, 32, 32))
    model = GoogLeNet()
    # model = load_network(model,os.path.join(file_dir,'..','model','pose_v02.pth'))
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"googlenet.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"googlenet.onnx"), verbose=True) 
Example #30
Source File: MobileNetV2.py    From onnx2caffe with MIT License 5 votes vote down vote up
def export(dir):
    dummy_input = Variable(torch.randn(1, 3, 224, 224))
    model = MobileNetV2()
    model.eval()
    torch.save(model.state_dict(),os.path.join(dir,"MobileNetV2.pth"))
    onnx.export(model, dummy_input,os.path.join(dir,"MobileNetV2.onnx"), verbose=True)