Skip to content

Commit 9c94c09

Browse files
authored
put code_gen.h in custom namespace (#1104)
* isolate code_gen in namespace change * fix silly typo * apply changes to template from PR1171
1 parent 723195e commit 9c94c09

File tree

7 files changed

+61
-41
lines changed

7 files changed

+61
-41
lines changed

hls4ml/backends/fpga/fpga_backend.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -755,7 +755,7 @@ def generate_conv1d_line_buffer_fn(self, layer_idx, n_partitions, in_W, in_C, ke
755755

756756
generated_code = (
757757
"template<class data_T, typename CONFIG_T>\n"
758-
"class fill_buffer_{index} : public FillConv1DBuffer<data_T, CONFIG_T> {{\n"
758+
"class fill_buffer_{index} : public nnet::FillConv1DBuffer<data_T, CONFIG_T> {{\n"
759759
" public:\n"
760760
" static void fill_buffer(\n"
761761
" data_T data[CONFIG_T::in_width * CONFIG_T::n_chan],\n"
@@ -885,7 +885,7 @@ def generate_conv2d_line_buffer_fn(
885885

886886
generated_code = (
887887
"template<class data_T, typename CONFIG_T>\n"
888-
"class fill_buffer_{index} : public FillConv2DBuffer<data_T, CONFIG_T> {{\n"
888+
"class fill_buffer_{index} : public nnet::FillConv2DBuffer<data_T, CONFIG_T> {{\n"
889889
" public:\n"
890890
" static void fill_buffer(\n"
891891
" data_T data[CONFIG_T::in_height * CONFIG_T::in_width * CONFIG_T::n_chan],\n"

hls4ml/backends/fpga/passes/hgq_proxy_model.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -75,10 +75,12 @@ def transform(self, model, node: FixedPointQuantizer):
7575
class ProcessFixedPointQuantizerCall(FunctionCallTemplate):
7676
def __init__(self):
7777
super().__init__(FixedPointQuantizer, include_header=[])
78-
self.template = 'nnet::{name}<{input_t}, {output_t}>({input}, {output});'
78+
self.template = '{namespace}::{name}<{input_t}, {output_t}>({input}, {output});'
7979

8080
def format(self, node):
8181
params = self._default_function_params(node)
82+
namespace = node.model.config.writer_config.get('Namespace', None) or 'nnet'
83+
params['namespace'] = namespace
8284

8385
return self.template.format(**params)
8486

hls4ml/backends/template.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -62,6 +62,7 @@ def _default_config_params(self, layer):
6262
params = self._default_params(layer)
6363
params['iotype'] = layer.model.config.get_config_value('IOType')
6464
params['reuse'] = layer.get_attr('reuse_factor')
65+
params['namespace'] = layer.model.config.get_writer_config().get('Namespace', None) or 'nnet'
6566

6667
return params
6768

hls4ml/backends/vivado/passes/convolution_templates.py

Lines changed: 32 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
typedef {bias_t.name} bias_t;
2424
typedef {weight_t.name} weight_t;
2525
template<class data_T, class res_T, class CONFIG_T>
26-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
26+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2727
template<class x_T, class y_T>
2828
using product = nnet::product::{product_type}<x_T, y_T>;
2929
}};\n"""
@@ -53,7 +53,7 @@
5353
static const unsigned n_partitions = {n_partitions};
5454
static const unsigned n_pixels = out_width / n_partitions;
5555
template<class data_T, class CONFIG_T>
56-
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
56+
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
5757
typedef {accum_t.name} accum_t;
5858
typedef {bias_t.name} bias_t;
5959
typedef {weight_t.name} weight_t;
@@ -91,9 +91,10 @@ def format(self, node):
9191
params['scale_index_type'] = 'scale_index_regular'
9292

9393
if node.model.config.get_config_value('IOType') == 'io_parallel':
94-
params['fill_fn'] = f'fill_buffer_{node.index}'
94+
namespace = params['namespace']
95+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
9596
else:
96-
params['fill_fn'] = 'FillConv1DBuffer'
97+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
9798

9899
is_pointwise_parallel_latency = (
99100
node.get_attr('filt_width') == 1
@@ -127,16 +128,18 @@ def format(self, node):
127128
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
128129
)
129130

131+
namespace = params['namespace']
132+
130133
if node.get_attr('strategy').lower() == 'latency':
131-
mult_params['dense_function'] = 'DenseLatency'
134+
mult_params['dense_function'] = 'nnet::DenseLatency'
132135
elif node.get_attr('strategy').lower() == 'resource':
133136
if int(mult_params['reuse_factor']) <= int(mult_params['n_in']):
134-
mult_params['dense_function'] = 'DenseResource_rf_leq_nin'
137+
mult_params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
135138
else:
136-
mult_params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
139+
mult_params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
137140
# The 3rd case is never used
138141
elif node.get_attr('strategy').lower() == 'resource_unrolled':
139-
mult_params['dense_function'] = f'dense_resource_unrolled_{node.index}'
142+
mult_params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'
140143

141144
mult_config = self.mult_template.format(**mult_params)
142145

@@ -194,7 +197,7 @@ def __init__(self):
194197
static const unsigned n_partitions = {n_partitions};
195198
static const unsigned n_pixels = out_height * out_width / n_partitions;
196199
template<class data_T, class CONFIG_T>
197-
using fill_buffer = nnet::{fill_fn}<data_T, CONFIG_T>;
200+
using fill_buffer = {fill_fn}<data_T, CONFIG_T>;
198201
typedef {accum_t.name} accum_t;
199202
typedef {bias_t.name} bias_t;
200203
typedef {weight_t.name} weight_t;
@@ -238,9 +241,10 @@ def format(self, node):
238241
params['scale_index_width_type'] = 'scale_index_regular'
239242

240243
if node.model.config.get_config_value('IOType') == 'io_parallel':
241-
params['fill_fn'] = f'fill_buffer_{node.index}'
244+
namespace = params['namespace']
245+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}'
242246
else:
243-
params['fill_fn'] = 'FillConv2DBuffer'
247+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
244248

245249
params['min_height'] = node.get_attr('min_height', node.get_attr('in_height'))
246250
params['min_width'] = node.get_attr('min_width', node.get_attr('in_width'))
@@ -256,16 +260,17 @@ def format(self, node):
256260
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
257261
)
258262

263+
namespace = params['namespace']
259264
if node.get_attr('strategy').lower() == 'latency':
260-
mult_params['dense_function'] = 'DenseLatency'
265+
mult_params['dense_function'] = 'nnet::DenseLatency'
261266
elif node.get_attr('strategy').lower() == 'resource':
262267
if int(mult_params['reuse_factor']) <= int(mult_params['n_in']):
263-
mult_params['dense_function'] = 'DenseResource_rf_leq_nin'
268+
mult_params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
264269
else:
265-
mult_params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
270+
mult_params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
266271
# The 3rd case is never used
267272
elif node.get_attr('strategy').lower() == 'resource_unrolled':
268-
mult_params['dense_function'] = f'dense_resource_unrolled_{node.index}'
273+
mult_params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'
269274

270275
mult_config = self.mult_template.format(**mult_params)
271276

@@ -341,9 +346,10 @@ def format(self, node):
341346
params['weight_t'] = node.get_weights('depthwise').type
342347
params['bias_t'] = node.get_weights('zero_bias').type
343348
if node.model.config.get_config_value('IOType') == 'io_parallel':
344-
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
349+
namespace = params['namespace']
350+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
345351
else:
346-
params['fill_fn'] = 'FillConv1DBuffer'
352+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
347353

348354
if node.get_attr('unscaled'):
349355
params['scale_index_type'] = 'scale_index_unscaled'
@@ -387,9 +393,10 @@ def format(self, node):
387393
params['min_width'] = params['in_width']
388394
params['instructions'] = '0'
389395
if node.model.config.get_config_value('IOType') == 'io_parallel':
390-
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
396+
namespace = params['namespace']
397+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
391398
else:
392-
params['fill_fn'] = 'FillConv1DBuffer'
399+
params['fill_fn'] = 'nnet::FillConv1DBuffer'
393400

394401
if node.get_attr('unscaled'):
395402
params['scale_index_type'] = 'scale_index_unscaled'
@@ -474,9 +481,10 @@ def format(self, node):
474481
params['index'] = str(node.index) + '_depthwise'
475482
params['weight_t'] = node.get_weights('depthwise').type
476483
if node.model.config.get_config_value('IOType') == 'io_parallel':
477-
params['fill_fn'] = f'fill_buffer_{node.index}_dw'
484+
namespace = params['namespace']
485+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_dw'
478486
else:
479-
params['fill_fn'] = 'FillConv2DBuffer'
487+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
480488

481489
if node.get_attr('unscaled_h'):
482490
params['scale_index_height_type'] = 'scale_index_unscaled'
@@ -528,9 +536,10 @@ def format(self, node):
528536
params['min_width'] = params['in_width']
529537
params['instructions'] = '0'
530538
if node.model.config.get_config_value('IOType') == 'io_parallel':
531-
params['fill_fn'] = f'fill_buffer_{node.index}_pw'
539+
namespace = params['namespace']
540+
params['fill_fn'] = f'{namespace}::fill_buffer_{node.index}_pw'
532541
else:
533-
params['fill_fn'] = 'FillConv2DBuffer'
542+
params['fill_fn'] = 'nnet::FillConv2DBuffer'
534543

535544
if node.get_attr('unscaled_h'):
536545
params['scale_index_height_type'] = 'scale_index_unscaled'

hls4ml/backends/vivado/passes/core_templates.py

Lines changed: 7 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
typedef {weight_t.name} weight_t;
2121
typedef {index_t.name} index_t;
2222
template<class data_T, class res_T, class CONFIG_T>
23-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
23+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2424
template<class x_T, class y_T>
2525
using product = nnet::product::{product_type}<x_T, y_T>;
2626
}};\n"""
@@ -43,16 +43,18 @@ def format(self, node):
4343
node.get_input_variable().type.precision, node.get_weights('weight').type.precision
4444
)
4545

46+
namespace = params['namespace']
47+
4648
if node.get_attr('strategy').lower() == 'latency':
47-
params['dense_function'] = 'DenseLatency'
49+
params['dense_function'] = 'nnet::DenseLatency'
4850
elif node.get_attr('strategy').lower() == 'resource':
4951
if int(params['reuse_factor']) <= int(params['n_in']):
50-
params['dense_function'] = 'DenseResource_rf_leq_nin'
52+
params['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
5153
else:
52-
params['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
54+
params['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
5355
# The 3rd case is never used
5456
elif node.get_attr('strategy').lower() == 'resource_unrolled':
55-
params['dense_function'] = f'dense_resource_unrolled_{node.index}'
57+
params['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}'
5658

5759
return self.template.format(**params)
5860

hls4ml/backends/vivado/passes/recurrent_templates.py

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717
typedef {bias_t.name} bias_t;
1818
typedef {weight_t.name} weight_t;
1919
template<class data_T, class res_T, class CONFIG_T>
20-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
20+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
2121
template<class x_T, class y_T>
2222
using product = nnet::product::{product_type}<x_T, y_T>;
2323
}};\n"""
@@ -35,7 +35,7 @@
3535
typedef {recurrent_bias_t.name} bias_t;
3636
typedef {recurrent_weight_t.name} weight_t;
3737
template<class data_T, class res_T, class CONFIG_T>
38-
using kernel = nnet::{dense_function}<data_T, res_T, CONFIG_T>;
38+
using kernel = {dense_function}<data_T, res_T, CONFIG_T>;
3939
template<class x_T, class y_T>
4040
using product = nnet::product::{product_type}<x_T, y_T>;
4141
}};\n"""
@@ -161,16 +161,18 @@ def format(self, node):
161161
mult_params1['nzeros'] = node.get_weights('weight').nzeros
162162
mult_params1['nonzeros'] = node.get_weights('weight').nonzeros
163163

164+
namespace = params['namespace']
165+
164166
if node.get_attr('strategy').lower() == 'latency':
165-
mult_params1['dense_function'] = 'DenseLatency'
167+
mult_params1['dense_function'] = 'nnet::DenseLatency'
166168
elif node.get_attr('strategy').lower() == 'resource':
167169
if int(mult_params1['reuse_factor']) <= int(mult_params1['n_in']):
168-
mult_params1['dense_function'] = 'DenseResource_rf_leq_nin'
170+
mult_params1['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
169171
else:
170-
mult_params1['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
172+
mult_params1['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
171173
# The 3rd case is never used
172174
elif node.get_attr('strategy').lower() == 'resource_unrolled':
173-
mult_params1['dense_function'] = f'dense_resource_unrolled_{node.index}_1'
175+
mult_params1['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_1'
174176

175177
if node.get_attr('return_sequences'):
176178
mult_params2['n_in'] = node.get_output_variable().shape[1]
@@ -187,15 +189,15 @@ def format(self, node):
187189
mult_params2['nonzeros'] = node.get_weights('recurrent_weight').nonzeros
188190

189191
if node.get_attr('strategy').lower() == 'latency':
190-
mult_params2['dense_function'] = 'DenseLatency'
192+
mult_params2['dense_function'] = 'nnet::DenseLatency'
191193
elif node.get_attr('strategy').lower() == 'resource':
192194
if int(mult_params2['reuse_factor']) <= int(mult_params2['n_in']):
193-
mult_params2['dense_function'] = 'DenseResource_rf_leq_nin'
195+
mult_params2['dense_function'] = 'nnet::DenseResource_rf_leq_nin'
194196
else:
195-
mult_params2['dense_function'] = 'DenseResource_rf_gt_nin_rem0'
197+
mult_params2['dense_function'] = 'nnet::DenseResource_rf_gt_nin_rem0'
196198
# The 3rd case is never used
197199
elif node.get_attr('strategy').lower() == 'resource_unrolled':
198-
mult_params2['dense_function'] = f'dense_resource_unrolled_{node.index}_2'
200+
mult_params2['dense_function'] = f'{namespace}::dense_resource_unrolled_{node.index}_2'
199201

200202
mult_config1 = self.mult1_template.format(**mult_params1)
201203
mult_config2 = self.mult2_template.format(**mult_params2)

hls4ml/writer/vivado_writer.py

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -792,6 +792,7 @@ def write_generated_code(self, model):
792792
contents = f.readlines()
793793
f.close()
794794
f = open(path, 'w')
795+
namespace = model.config.get_writer_config().get('Namespace', None)
795796

796797
for line in contents:
797798
if '// hls4ml insert code' in line:
@@ -801,6 +802,9 @@ def write_generated_code(self, model):
801802
newline += str(generated_code)
802803
else:
803804
newline = line
805+
if namespace is not None:
806+
if 'namespace nnet' in newline:
807+
newline = newline.replace('namespace nnet', f'namespace {namespace}')
804808
f.write(newline)
805809
f.close()
806810

0 commit comments

Comments
 (0)