解决RuntimeError: Exporting the operator deform_conv2d to ONNX opset version 14 is not supported. Please feel free to request support or submit a pull request on PyTorch GitHub问题
参考
一、安装对应的包
pip install deform_conv2d_onnx_exporter
二、构建简易网络模型
import torch
import torchvision.ops
from torch import nn
import deform_conv2d_onnx_exporter
deform_conv2d_onnx_exporter.register_deform_conv2d_onnx_op()class mynet(nn.Module):def __init__(self):super(mynet,self).__init__()self.weight = torch.rand(5, 3, 3, 3) # (out_channels, in_channels, kernel_height, kernel_width)def forward(self, inputs, offset):# 应用可形变卷积output = torchvision.ops.deform_conv2d(inputs, offset, self.weight)return output
net = mynet()
net.eval()
inputs = torch.rand(4, 3, 10, 10) # (batch_size, channels, height, width)
offset = torch.rand(4, 2 * 3 * 3, 8, 8) # (batch_size, 2*kernel_height*kernel_width, output_height, output_width)
input_names = ['input_image', "offset"]
output_names = ['preds',]torch.onnx.export(net, (inputs,offset), "test.onnx",input_names=input_names, output_names=output_names,verbose=False, opset_version=12)
三、查看onnx模型
四、可形变卷积时间
import os
# os.environ["CUDA_VISIBLE_DEVICES"]='0'
import numpy as np
import onnxruntime
import time
ort_session = onnxruntime.InferenceSession("test.onnx", providers=['CUDAExecutionProvider', 'CPUExecutionProvider'])
input_name0 = ort_session.get_inputs()[0].name
input_name1 = ort_session.get_inputs()[1].name
input_size = (4, 3, 10, 10)
offset = (4, 2 * 3 * 3, 8, 8)
input_feature_map = np.random.random(input_size).astype(np.float32)
offset_feature_map = np.random.random(offset).astype(np.float32)
time_list =[]
for i in range(55):start_time = time.time()out = ort_session.run(None, {input_name0:input_feature_map, input_name1:offset_feature_map})[0]end_time = time.time()-start_timeif i > 5 and end_time>0.0:time_list.append(end_time)print(i, out.shape, end_time)
print(np.mean(time_list))
对比常规卷积的时间各位自己测试哈。。。