forked from zzzDavid/ultranet_hls_ref
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathconv2d.h
253 lines (215 loc) · 7.83 KB
/
conv2d.h
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
#pragma once
#include <hls_stream.h>
#include <ap_int.h>
using namespace hls;
#include "sliding_window_unit.h"
#include "matrix_vector_unit.h"
#include "function.h"
#include "stream_tools.h"
#ifndef __SYNTHESIS__
#include <iostream>
#include <iomanip>
#include <fstream>
template <
unsigned IN_ROW,
unsigned IN_COL,
unsigned IN_CH,
unsigned IN_BIT,
unsigned OUT_CH,
unsigned OUT_BIT, // 量化激活后的位宽
unsigned W_BIT,
unsigned M_BIT,
unsigned INC_BIT,
unsigned BIAS_BIT,
unsigned SIMD,
unsigned PE,
unsigned L_SHIFT>
void conv3x3_bn_act_optrace(
stream<ap_uint<IN_BIT * IN_CH> >& in,
const ap_uint<SIMD*W_BIT> weights[PE][((IN_CH*9)/SIMD)*(OUT_CH/PE)],
const ap_int<INC_BIT> inc[PE][OUT_CH/PE],
const ap_int<BIAS_BIT> bias[PE][OUT_CH/PE],
stream<ap_uint<OUT_BIT*OUT_CH> >& out,
const unsigned reps,
std::ofstream &ofs
){
const unsigned INTER_ROW = IN_ROW + 2;
const unsigned INTER_COL = IN_COL + 2;
const unsigned OUT_ROW = IN_ROW;
const unsigned OUT_COL = IN_COL;
// pading
stream<ap_uint<IN_CH*IN_BIT> > padding_out("samepad_out");
padding<IN_ROW, IN_COL, IN_CH, IN_BIT, 1>(in, padding_out, reps);
stream<ap_uint<IN_CH*IN_BIT> > swu_out("swu_out");
SWU<3, 1, INTER_ROW, INTER_COL, IN_CH, IN_BIT> (padding_out, swu_out, reps);
stream<ap_uint<SIMD*IN_BIT> > adj_out("adj_out");
StreamingDataWidthConverter_Batch<IN_CH*IN_BIT, SIMD*IN_BIT, 9*OUT_ROW*OUT_COL>(swu_out, adj_out, reps);
stream<ap_uint<PE*OUT_BIT> > mvau_out("mvau_out");
matrix_vector_act_unit_optrace<IN_CH*3*3, OUT_CH, IN_BIT, OUT_BIT, W_BIT, M_BIT, INC_BIT, BIAS_BIT, SIMD, PE, L_SHIFT, OUT_ROW*OUT_COL>
(adj_out, weights, inc, bias, mvau_out, reps, ofs);
StreamingDataWidthConverter_Batch<PE*OUT_BIT, OUT_CH*OUT_BIT, OUT_ROW * OUT_COL * OUT_CH / PE>(mvau_out, out, reps);
}
#endif
/**
* 卷积计算单元 同时计算bn_层与激活层
* 在矩阵向量计算后立即计算得到激活输出值
* 只计算 3x3 的卷积 K = 3, P = 1 S = 1
* 输入数据宽度 为 IN_STREAM_BIT
* 输出数据宽度为 PE * OUT_BIT
*/
template <
unsigned IN_ROW,
unsigned IN_COL,
unsigned IN_CH,
unsigned IN_BIT,
unsigned OUT_CH,
unsigned OUT_BIT, // 量化激活后的位宽
unsigned W_BIT,
unsigned M_BIT,
unsigned INC_BIT,
unsigned BIAS_BIT,
unsigned SIMD,
unsigned PE,
unsigned L_SHIFT>
void conv3x3_bn_act(
stream<ap_uint<IN_BIT * IN_CH> >& in,
const ap_uint<SIMD*W_BIT> weights[PE][((IN_CH*9)/SIMD)*(OUT_CH/PE)],
const ap_int<INC_BIT> inc[PE][OUT_CH/PE],
const ap_int<BIAS_BIT> bias[PE][OUT_CH/PE],
stream<ap_uint<OUT_BIT*OUT_CH> >& out,
const unsigned reps = 1)
{
#pragma HLS DATAFLOW
const unsigned INTER_ROW = IN_ROW + 2;
const unsigned INTER_COL = IN_COL + 2;
// 暂时认为输入 输出维度不变
const unsigned OUT_ROW = IN_ROW;
const unsigned OUT_COL = IN_COL;
// stream<ap_uint<IN_CH*IN_BIT> > in_adj("in_adj");
// StreamingDataWidthConverter_Batch<IN_STREAM_BIT, IN_CH*IN_BIT>(in, in_adj, reps);
// pading
stream<ap_uint<IN_CH*IN_BIT> > padding_out("samepad_out");
padding<IN_ROW, IN_COL, IN_CH, IN_BIT, 1>(in, padding_out, reps);
stream<ap_uint<IN_CH*IN_BIT> > swu_out("swu_out");
SWU<3, 1, INTER_ROW, INTER_COL, IN_CH, IN_BIT> (padding_out, swu_out, reps);
// 位宽调整
stream<ap_uint<SIMD*IN_BIT> > adj_out("adj_out");
StreamingDataWidthConverter_Batch<IN_CH*IN_BIT, SIMD*IN_BIT, 9*OUT_ROW*OUT_COL>(swu_out, adj_out, reps);
// cout << "adj_out size " << adj_out.size() << endl;
// 矩阵向量计算
stream<ap_uint<PE*OUT_BIT> > mvau_out("mvau_out");
matrix_vector_act_unit<IN_CH*3*3, OUT_CH, IN_BIT, OUT_BIT, W_BIT, M_BIT, INC_BIT, BIAS_BIT, SIMD, PE, L_SHIFT, OUT_ROW*OUT_COL>
(adj_out, weights, inc, bias, mvau_out, reps);
// cout << "mvau_out size " << mvau_out.size() << endl;
StreamingDataWidthConverter_Batch<PE*OUT_BIT, OUT_CH*OUT_BIT, OUT_ROW * OUT_COL * OUT_CH / PE>(mvau_out, out, reps);
}
/**
* 卷积计算单元 同时计算bn_层与激活层
* 在矩阵向量计算后立即计算得到激活输出值
* 只计算 3x3 的卷积 K = 3, P = 1 S = 1
* 输入数据宽度 为 IN_STREAM_BIT
* 输出数据宽度为 PE * OUT_BIT
* 使用 lut 计算乘法
*/
template <
unsigned IN_ROW,
unsigned IN_COL,
unsigned IN_CH,
unsigned IN_BIT,
unsigned OUT_CH,
unsigned OUT_BIT, // 量化激活后的位宽
unsigned W_BIT,
unsigned M_BIT,
unsigned INC_BIT,
unsigned BIAS_BIT,
unsigned SIMD,
unsigned PE,
unsigned L_SHIFT>
void conv3x3_bn_act_lut(
stream<ap_uint<IN_BIT * IN_CH> >& in,
const ap_uint<SIMD*W_BIT> weights[PE][((IN_CH*9)/SIMD)*(OUT_CH/PE)],
const ap_uint<INC_BIT> inc[PE][OUT_CH/PE],
const ap_int<BIAS_BIT> bias[PE][OUT_CH/PE],
stream<ap_uint<OUT_BIT*OUT_CH> >& out,
const unsigned reps = 1)
{
#pragma HLS DATAFLOW
const unsigned INTER_ROW = IN_ROW + 2;
const unsigned INTER_COL = IN_COL + 2;
// 暂时认为输入 输出维度不变
const unsigned OUT_ROW = IN_ROW;
const unsigned OUT_COL = IN_COL;
// pading
stream<ap_uint<IN_CH*IN_BIT> > padding_out("samepad_out");
padding<IN_ROW, IN_COL, IN_CH, IN_BIT, 1>(in, padding_out, reps);
// 滑动窗口
stream<ap_uint<IN_CH*IN_BIT> > swu_out("swu_out");
SWU<3, 1, INTER_ROW, INTER_COL, IN_CH, IN_BIT> (padding_out, swu_out, reps);
// 位宽调整
stream<ap_uint<SIMD*IN_BIT> > adj_out("adj_out");
StreamingDataWidthConverter_Batch<IN_CH*IN_BIT, SIMD*IN_BIT, 9*OUT_ROW*OUT_COL>(swu_out, adj_out, reps);
// 矩阵向量计算
stream<ap_uint<PE*OUT_BIT> > mvau_out("mvau_out");
matrix_vector_act_unit_lut<IN_CH*3*3, OUT_CH, IN_BIT, OUT_BIT, W_BIT, M_BIT, INC_BIT, BIAS_BIT, SIMD, PE, L_SHIFT, OUT_ROW*OUT_COL>
(adj_out, weights, inc, bias, mvau_out, reps);
StreamingDataWidthConverter_Batch<PE*OUT_BIT, OUT_CH*OUT_BIT, OUT_ROW * OUT_COL>(mvau_out, out, reps);
}
/**
* 卷积计算单元 同时计算bn_层与激活层
* 在矩阵向量计算后立即计算得到激活输出值
* 只计算 1x1 的卷积 K = 1, P = 1 S = 1
*/
template <
unsigned IN_ROW,
unsigned IN_COL,
unsigned IN_CH,
unsigned IN_BIT,
unsigned OUT_CH,
unsigned OUT_BIT, // 量化激活后的位宽
unsigned W_BIT,
unsigned M_BIT,
unsigned INC_BIT,
unsigned BIAS_BIT,
unsigned SIMD,
unsigned PE,
unsigned L_SHIFT>
void conv1x1_bn_act(
stream<ap_uint<IN_BIT * IN_CH> >& in,
const ap_uint<SIMD*W_BIT> weights[PE][((IN_CH)/SIMD)*(OUT_CH/PE)],
const ap_uint<INC_BIT> inc[PE][OUT_CH/PE],
const ap_int<BIAS_BIT> bias[PE][OUT_CH/PE],
stream<ap_uint<OUT_BIT*OUT_CH> >& out,
const unsigned reps = 1)
{
#pragma HLS DATAFLOW
const unsigned OUT_ROW = IN_ROW;
const unsigned OUT_COL = IN_COL;
stream<ap_uint<SIMD*IN_BIT> > adj_out("adj_out");
StreamingDataWidthConverter_Batch<IN_CH*IN_BIT, SIMD*IN_BIT, OUT_ROW*OUT_COL>(in, adj_out, reps);
// 矩阵向量计算
stream<ap_uint<PE*OUT_BIT> > mvau_out("mvau_out");
matrix_vector_act_unit<IN_CH, OUT_CH, IN_BIT, OUT_BIT, W_BIT, M_BIT, INC_BIT, BIAS_BIT, SIMD, PE, L_SHIFT, OUT_ROW*OUT_COL>
(adj_out, weights, inc, bias, mvau_out, reps);
StreamingDataWidthConverter_Batch<PE*OUT_BIT, OUT_CH*OUT_BIT, OUT_ROW * OUT_COL * OUT_CH / PE>(mvau_out, out, reps);
}
template <
unsigned IN_ROW,
unsigned IN_COL,
unsigned IN_CH,
unsigned IN_BIT,
unsigned OUT_CH,
unsigned W_BIT,
unsigned M_BIT,
unsigned SIMD,
unsigned PE>
void conv1x1(
stream<ap_uint<IN_BIT * SIMD> >& in,
const ap_uint<SIMD*W_BIT> weights[PE][((IN_CH*1)/SIMD)*(OUT_CH/PE)],
stream<ap_uint<PE*M_BIT> >& out,
const unsigned reps = 1)
{
const unsigned OUT_ROW = IN_ROW;
const unsigned OUT_COL = IN_COL;
matrix_vector_unit<IN_CH, OUT_CH, IN_BIT, W_BIT, M_BIT, SIMD, PE, OUT_ROW*OUT_COL>
(in, weights, out, reps);
}