[TOC]

文章转自:https://leimao.github.io/blog/ONNX-Python-API/

概述

Open Neural Network Exchange (ONNX)是一种表示机器学习模型的开放标准格式。ONNX是使用最广泛的机器学习模型格式,得到了在许多框架和工具中实现过它的合作伙伴社区的支持。

在这篇博文中,我想讨论如何使用ONNX Python API创建和修改ONNX模型。

ONNX数据结构

ONNX模型使用Protocol Buffers表示。具体地说,整个模型信息是使用onnix .proto编码的

描述神经网络的主要ONNX协议缓冲区有ModelProto, GraphProto, NodeProto, TensorProto, ValueInfoProto。

Key ONNX Protos Description
ModelProto It contains model description and GraphProto.
GraphProto It contains the node information, node initializers, and IO tensors in the model.
NodeProto It represents a node in the model. It contains the input and output tensor names, node initializers, and node attributes.
TensorProto It represents an node initializer (constant tensor in the node). In addition to the data type and shape, specific values were assigned.
ValueInfoProto It represents an IO tensor in the model in which only the data type and shape were defined.

创建ONNX模型

为了更好地理解ONNX协议缓冲区,让我们使用ONNX Python API从头创建一个虚拟卷积分类神经网络,由卷积、批处理归一化(batch normalization)、ReLU、平均池化层(average pooling layers,)组成(ONNX辅助函数ONNX .helper)。

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
import numpy as np
import onnx


def create_initializer_tensor(
name: str,
tensor_array: np.ndarray,
data_type: onnx.TensorProto = onnx.TensorProto.FLOAT) -> onnx.TensorProto:
# (TensorProto)
initializer_tensor = onnx.helper.make_tensor(
name=name,
data_type=data_type,
dims=tensor_array.shape,
vals=tensor_array.flatten().tolist())
return initializer_tensor


def main() -> None:
# Create a dummy convolutional neural network.

# IO tensors (ValueInfoProto).
model_input_name = "X"
X = onnx.helper.make_tensor_value_info(model_input_name,
onnx.TensorProto.FLOAT,
[None, 3, 32, 32])
model_output_name = "Y"
model_output_channels = 10
Y = onnx.helper.make_tensor_value_info(model_output_name,
onnx.TensorProto.FLOAT,
[None, model_output_channels, 1, 1])

# Create a Conv node (NodeProto).
# https://github.com/onnx/onnx/blob/rel-1.9.0/docs/Operators.md#conv
conv1_output_node_name = "Conv1_Y"
# Dummy weights for conv.
conv1_in_channels = 3
conv1_out_channels = 32
conv1_kernel_shape = (3, 3)
conv1_pads = (1, 1, 1, 1)
conv1_W = np.ones(shape=(conv1_out_channels, conv1_in_channels,
*conv1_kernel_shape)).astype(np.float32)
conv1_B = np.ones(shape=(conv1_out_channels)).astype(np.float32)

# Create the initializer tensor for the weights.
conv1_W_initializer_tensor_name = "Conv1_W"
conv1_W_initializer_tensor = create_initializer_tensor(
name=conv1_W_initializer_tensor_name,
tensor_array=conv1_W,
data_type=onnx.TensorProto.FLOAT)

conv1_B_initializer_tensor_name = "Conv1_B"
conv1_B_initializer_tensor = create_initializer_tensor(
name=conv1_B_initializer_tensor_name,
tensor_array=conv1_B,
data_type=onnx.TensorProto.FLOAT)
conv1_node = onnx.helper.make_node(
name="Conv1", # Name is optional.
op_type="Conv",
# Must follow the order of input and output definitions.
# https://github.com/onnx/onnx/blob/rel-1.9.0/docs/Operators.md#inputs-2---3
inputs=[
model_input_name, conv1_W_initializer_tensor_name,
conv1_B_initializer_tensor_name
],
outputs=[conv1_output_node_name],
# The following arguments are attributes.
kernel_shape=conv1_kernel_shape,
# Default values for other attributes: strides=[1, 1], dilations=[1, 1], groups=1
pads=conv1_pads,
)

# Create a BatchNorm node (NodeProto).
bn1_output_node_name = "BN1_Y"
# Dummy paramters for batchnorm.
bn1_scale = np.random.randn(conv1_out_channels).astype(np.float32)
bn1_bias = np.random.randn(conv1_out_channels).astype(np.float32)
bn1_mean = np.random.randn(conv1_out_channels).astype(np.float32)
bn1_var = np.random.rand(conv1_out_channels).astype(np.float32)
# Create the initializer tensors.
bn1_scale_initializer_tensor_name = "BN1_Scale"
bn1_bias_initializer_tensor_name = "BN1_Bias"
bn1_mean_initializer_tensor_name = "BN1_Mean"
bn1_var_initializer_tensor_name = "BN1_Var"
bn1_scale_initializer_tensor = create_initializer_tensor(
name=bn1_scale_initializer_tensor_name,
tensor_array=bn1_scale,
data_type=onnx.TensorProto.FLOAT)
bn1_bias_initializer_tensor = create_initializer_tensor(
name=bn1_bias_initializer_tensor_name,
tensor_array=bn1_bias,
data_type=onnx.TensorProto.FLOAT)
bn1_mean_initializer_tensor = create_initializer_tensor(
name=bn1_mean_initializer_tensor_name,
tensor_array=bn1_mean,
data_type=onnx.TensorProto.FLOAT)
bn1_var_initializer_tensor = create_initializer_tensor(
name=bn1_var_initializer_tensor_name,
tensor_array=bn1_var,
data_type=onnx.TensorProto.FLOAT)

bn1_node = onnx.helper.make_node(
name="BN1", # Name is optional.
op_type="BatchNormalization",
inputs=[
conv1_output_node_name, bn1_scale_initializer_tensor_name,
bn1_bias_initializer_tensor_name, bn1_mean_initializer_tensor_name,
bn1_var_initializer_tensor_name
],
outputs=[bn1_output_node_name],
)

# Create a ReLU node (NodeProto).
relu1_output_node_name = "ReLU1_Y"

relu1_node = onnx.helper.make_node(
name="ReLU1", # Name is optional.
op_type="Relu",
inputs=[bn1_output_node_name],
outputs=[relu1_output_node_name],
)

# Create a GlobalAveragePool node (NodeProto).
avg_pool1_output_node_name = "Avg_Pool1_Y"

avg_pool1_node = onnx.helper.make_node(
name="Avg_Pool1", # Name is optional.
op_type="GlobalAveragePool",
inputs=[relu1_output_node_name],
outputs=[avg_pool1_output_node_name],
)

# Create a Conv node (NodeProto).
# https://github.com/onnx/onnx/blob/rel-1.9.0/docs/Operators.md#conv
# Dummy weights for conv.
conv2_in_channels = conv1_out_channels
conv2_out_channels = model_output_channels
conv2_kernel_shape = (1, 1)
conv2_pads = (0, 0, 0, 0)
conv2_W = np.ones(shape=(conv2_out_channels, conv2_in_channels,
*conv2_kernel_shape)).astype(np.float32)
conv2_B = np.ones(shape=(conv2_out_channels)).astype(np.float32)
# Create the initializer tensor for the weights.
conv2_W_initializer_tensor_name = "Conv2_W"
conv2_W_initializer_tensor = create_initializer_tensor(
name=conv2_W_initializer_tensor_name,
tensor_array=conv2_W,
data_type=onnx.TensorProto.FLOAT)
conv2_B_initializer_tensor_name = "Conv2_B"
conv2_B_initializer_tensor = create_initializer_tensor(
name=conv2_B_initializer_tensor_name,
tensor_array=conv2_B,
data_type=onnx.TensorProto.FLOAT)

conv2_node = onnx.helper.make_node(
name="Conv2",
op_type="Conv",
inputs=[
avg_pool1_output_node_name, conv2_W_initializer_tensor_name,
conv2_B_initializer_tensor_name
],
outputs=[model_output_name],
kernel_shape=conv2_kernel_shape,
pads=conv2_pads,
)

# Create the graph (GraphProto)
graph_def = onnx.helper.make_graph(
nodes=[conv1_node, bn1_node, relu1_node, avg_pool1_node, conv2_node],
name="ConvNet",
inputs=[X], # Graph input
outputs=[Y], # Graph output
initializer=[
conv1_W_initializer_tensor, conv1_B_initializer_tensor,
bn1_scale_initializer_tensor, bn1_bias_initializer_tensor,
bn1_mean_initializer_tensor, bn1_var_initializer_tensor,
conv2_W_initializer_tensor, conv2_B_initializer_tensor
],
)

# Create the model (ModelProto)
model_def = onnx.helper.make_model(graph_def, producer_name="onnx-example")
model_def.opset_import[0].version = 13

model_def = onnx.shape_inference.infer_shapes(model_def)

onnx.checker.check_model(model_def)

onnx.save(model_def, "convnet.onnx")


if __name__ == "__main__":
main()

一旦创建了ONNX模型,我们可以使用ONNX Runtime进一步验证模型。

修改ONNX模型

修改ONNX模型有点复杂,因为所有的信息都被编码为协议缓冲区,没有ONNX辅助函数来修改协议缓冲区。

幸运的是,我们可以为onnx中的非重复属性赋值。直接原型。对于重复的属性,我们不能给它赋新值,但我们可以修改原地的值,或者使用Python绑定接口(如extend和pop)来添加和删除项。