Skip to content

Commit 425a116

Browse files
[pre-commit.ci] auto fixes from pre-commit hooks
1 parent 14f7dac commit 425a116

File tree

4 files changed

+15
-17
lines changed

4 files changed

+15
-17
lines changed

hls4ml/converters/pytorch/convolution.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@ def parse_conv1d_layer(operation, layer_name, input_names, input_shapes, node, c
1717
layer['class_name'] = 'Conv1D'
1818
layer['data_format'] = 'channels_first' # Pytorch default (can't change)
1919

20-
if "Quant" in operation:
20+
if 'Quant' in operation:
2121
if class_object.weight_quant.is_quant_enabled:
2222
width = int(class_object.quant_weight().bit_width)
2323
scale = class_object.quant_weight().scale.detach().numpy()
@@ -32,8 +32,8 @@ def parse_conv1d_layer(operation, layer_name, input_names, input_shapes, node, c
3232
)
3333
else:
3434
raise Exception(
35-
'''Non-power of 2 quantization of weights not supported when injecting brevitas models.
36-
Please used QONNX instead.'''
35+
"""Non-power of 2 quantization of weights not supported when injecting brevitas models.
36+
Please used QONNX instead."""
3737
)
3838
else:
3939
layer['weight_data'] = class_object.weight.data.numpy()
@@ -100,7 +100,7 @@ def parse_conv2d_layer(operation, layer_name, input_names, input_shapes, node, c
100100
layer['class_name'] = 'Conv2D'
101101
layer['data_format'] = 'channels_first' # Pytorch default (can't change)
102102

103-
if "Quant" in operation:
103+
if 'Quant' in operation:
104104
if class_object.weight_quant.is_quant_enabled:
105105
width = int(class_object.quant_weight().bit_width)
106106
scale = class_object.quant_weight().scale.detach().numpy()
@@ -115,8 +115,8 @@ def parse_conv2d_layer(operation, layer_name, input_names, input_shapes, node, c
115115
)
116116
else:
117117
raise Exception(
118-
'''Non-power of 2 quantization of weights not supported when injecting brevitas models.
119-
Please used QONNX instead.'''
118+
"""Non-power of 2 quantization of weights not supported when injecting brevitas models.
119+
Please used QONNX instead."""
120120
)
121121
# layer = addQuantizationParameters(layer, class_object.quant_weight(), 'weight')
122122
# layer['weight_data'] = class_object.quant_weight().detach().value.numpy()

hls4ml/model/optimizer/passes/brevitas_optimizer.py

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -5,7 +5,7 @@
55

66

77
class BrevitasInputOutputOptimizer(OptimizerPass):
8-
'''Takes nodes parsed from brevitas and inserts Quant nodes into the model if necessary'''
8+
"""Takes nodes parsed from brevitas and inserts Quant nodes into the model if necessary"""
99

1010
def match(self, node):
1111
if ('output_quantization' in node.attributes.keys() and not len(node.attributes['output_quantization']) == 0) or (
@@ -16,7 +16,6 @@ def match(self, node):
1616
return False
1717

1818
def transform(self, model, node):
19-
2019
# See if Quant layer needs to be added for the output
2120
if 'output_quantization' in node.attributes.keys() and not len(node.attributes['output_quantization']) == 0:
2221
print(node.attributes['output_quantization'])
@@ -39,7 +38,6 @@ def transform(self, model, node):
3938
node.attributes['output_quantization'] = {}
4039

4140
elif 'input_quantization' in node.attributes.keys() and not len(node.attributes['input_quantization']) == 0:
42-
4341
attributes = {}
4442

4543
# Other attributes

test/pytest/test_brevitas_parsing.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -95,11 +95,11 @@ def test_quantconv1d(backend, io_type):
9595
if io_type == 'io_stream':
9696
x = np.ascontiguousarray(x.permute(0, 2, 1))
9797
config = config_from_pytorch_model(
98-
model, (None, n_in, size_in), channels_last_conversion="internal", transpose_outputs=False
98+
model, (None, n_in, size_in), channels_last_conversion='internal', transpose_outputs=False
9999
)
100100
else:
101101
config = config_from_pytorch_model(
102-
model, (None, n_in, size_in), channels_last_conversion="full", transpose_outputs=True
102+
model, (None, n_in, size_in), channels_last_conversion='full', transpose_outputs=True
103103
)
104104

105105
output_dir = str(test_root_path / f'hls4mlprj_brevitas_conv1d_{backend}_{io_type}')
@@ -156,11 +156,11 @@ def test_quantconv2d(backend, io_type):
156156
if io_type == 'io_stream':
157157
x = np.ascontiguousarray(x.permute(0, 2, 3, 1))
158158
config = config_from_pytorch_model(
159-
model, (None, n_in, size_in_height, size_in_width), channels_last_conversion="internal", transpose_outputs=False
159+
model, (None, n_in, size_in_height, size_in_width), channels_last_conversion='internal', transpose_outputs=False
160160
)
161161
else:
162162
config = config_from_pytorch_model(
163-
model, (None, n_in, size_in_height, size_in_width), channels_last_conversion="full", transpose_outputs=True
163+
model, (None, n_in, size_in_height, size_in_width), channels_last_conversion='full', transpose_outputs=True
164164
)
165165

166166
output_dir = str(test_root_path / f'hls4mlprj_brevitas_conv2d_{backend}_{io_type}')
@@ -270,7 +270,7 @@ def test_pytorch_upsampling1d(data_1d, io_type, backend):
270270
model,
271271
(None, in_feat, in_width),
272272
default_precision='ap_fixed<16,6>',
273-
channels_last_conversion="internal",
273+
channels_last_conversion='internal',
274274
transpose_outputs=False,
275275
)
276276
odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_1d_{backend}_{io_type}')
@@ -300,7 +300,7 @@ def test_pytorch_upsampling2d(data_2d, io_type, backend):
300300
model,
301301
(in_feat, in_height, in_width),
302302
default_precision='ap_fixed<16,6>',
303-
channels_last_conversion="full", # With conversion to channels_last
303+
channels_last_conversion='full', # With conversion to channels_last
304304
transpose_outputs=True,
305305
)
306306
odir = str(test_root_path / f'hls4mlprj_pytorch_upsampling_2d_{backend}_{io_type}')
@@ -338,7 +338,7 @@ def test_brevitas_quanteltwiseadd(io_type, backend):
338338
model,
339339
[(None, 4, 4), (None, 4, 4)],
340340
default_precision='ap_fixed<16,6>',
341-
channels_last_conversion="off",
341+
channels_last_conversion='off',
342342
transpose_outputs=False,
343343
)
344344
odir = str(test_root_path / f'hls4mlprj_brevitas_quanteltwiseadd_{backend}_{io_type}')

test/pytest/test_recurrent_brevitas.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def test_rnn(backend, io_type):
5555
config = config_from_pytorch_model(
5656
model,
5757
[(None, 1, 10), (None, 1, 20)],
58-
channels_last_conversion="off",
58+
channels_last_conversion='off',
5959
transpose_outputs=False,
6060
default_precision='fixed<32,16>',
6161
)

0 commit comments

Comments
 (0)