Skip to content

Commit 1bb98fe

Browse files
committed
added necessary weight sources and non default precision convert for GarNet
1 parent 6ea168d commit 1bb98fe

File tree

4 files changed

+13
-10
lines changed

4 files changed

+13
-10
lines changed

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -13,3 +13,4 @@ docs/_build
1313
docs/autodoc/*
1414
hls4mlprj_*
1515
*~
16+
*.ipynb_checkpoints/

hls4ml/backends/vivado/passes/garnet_templates.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,8 @@ def format(self, node):
114114
params[f'{vname}_t'], type_name = node.model.config.get_precision(node, var=vname)
115115
if type_name.endswith('default_t'):
116116
params[f'{vname}_t'] = precision_converter.convert(default_precision).definition_cpp()
117-
117+
else:
118+
params[f'{vname}_t'] = precision_converter.convert(params[f'{vname}_t']).definition_cpp()
118119
params['output_t'] = node.get_output_variable().type.name
119120

120121
if node.attributes['collapse'] in ['mean', 'max']:

hls4ml/converters/keras/graph.py

Lines changed: 9 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,13 +46,17 @@ def parse_garnet_layer(keras_layer, input_names, input_shapes, data_reader):
4646
layer['n_sublayers'] = keras_layer['config']['n_sublayers']
4747
layer['n_in_features'] = [input_shapes[0][2]]
4848

49-
for il in range(1, layer['n_sublayers']):
50-
layer['n_in_features'].append(layer['n_out_features'][il - 1])
49+
for il in range(layer['n_sublayers']):
50+
if il > 0:
51+
layer['n_in_features'].append(layer['n_out_features'][il - 1])
5152

5253
weights_source = [
53-
f'S{il}_kernel',
54-
f'S{il}_bias',
55-
f'Fout{il}_bias',
54+
f'FLR{il}_kernel',
55+
f'FLR{il}_bias',
56+
f'S{il}_kernel',
57+
f'S{il}_bias',
58+
f'Fout{il}_kernel',
59+
f'Fout{il}_bias',
5660
]
5761
for weight in weights_source:
5862
layer[weight + '_data'] = get_weights_data(data_reader, layer['name'], weight)

hls4ml/model/layers.py

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1182,10 +1182,7 @@ def _initialize_transforms(self):
11821182

11831183
def _make_input_transform_weights(self, n_propagate, n_aggregators, n_out_features, quantize=False, sublayer=''):
11841184
# Due to linearity of the input transform, input weights and biases can be contracted away at conversion time
1185-
1186-
output_transform_kernel = self.get_attr(
1187-
f'Fout{sublayer}_kernel_data'
1188-
) # [(n_aggregators, n_propagate), n_out_features]
1185+
output_transform_kernel = self.get_attr(f'Fout{sublayer}_kernel_data') # [(n_aggregators, n_propagate), n_out_features]
11891186
output_transform_kernel = output_transform_kernel.reshape((n_aggregators, n_propagate, n_out_features))
11901187
if quantize:
11911188
output_transform_kernel = self.get_attr('quantizer')(output_transform_kernel)

0 commit comments

Comments
 (0)