Skip to content

Commit

Permalink
Merge remote-tracking branch 'hls4ml-central/master' into bnn
Browse files Browse the repository at this point in the history
Conflicts:
	hls-writer/hls_writer.py
	keras-to-hls/keras-to-hls.py
  • Loading branch information
jngadiub committed Jan 14, 2019
2 parents e81e98b + f0d92f8 commit abdb2b6
Show file tree
Hide file tree
Showing 18 changed files with 1,326 additions and 47 deletions.
26 changes: 23 additions & 3 deletions hls-template/build_prj.tcl
Original file line number Diff line number Diff line change
Expand Up @@ -10,14 +10,22 @@ array set opt {

foreach arg $::argv {
foreach o [lsort [array names opt]] {
regexp "$o +(\\w+)" $arg unused opt($o)
regexp "$o=+(\\w+)" $arg unused opt($o)
}
}

proc report_time { op_name time_start time_end } {
set time_taken [expr $time_end - $time_start]
set time_s [expr ($time_taken / 1000) % 60]
set time_m [expr ($time_taken / (1000*60)) % 60]
set time_h [expr ($time_taken / (1000*60*60)) % 24]
puts "***** ${op_name} COMPLETED IN ${time_h}h${time_m}m${time_s}s *****"
}

open_project -reset myproject_prj
set_top myproject
add_files firmware/myproject.cpp -cflags "-I[file normalize nnet_utils]"
add_files -tb myproject_test.cpp -cflags "-I[file normalize nnet_utils]"
add_files firmware/myproject.cpp -cflags "-I[file normalize nnet_utils] -std=c++0x"
add_files -tb myproject_test.cpp -cflags "-I[file normalize nnet_utils] -std=c++0x"
add_files -tb firmware/weights
#add_files -tb tb_data
open_solution -reset "solution1"
Expand All @@ -27,19 +35,31 @@ create_clock -period 5 -name default

if {$opt(csim)} {
puts "***** C SIMULATION *****"
set time_start [clock clicks -milliseconds]
csim_design
set time_end [clock clicks -milliseconds]
report_time "C SIMULATION" $time_start $time_end
}

if {$opt(synth)} {
puts "***** C/RTL SYNTHESIS *****"
set time_start [clock clicks -milliseconds]
csynth_design
set time_end [clock clicks -milliseconds]
report_time "C/RTL SYNTHESIS" $time_start $time_end
if {$opt(cosim)} {
puts "***** C/RTL SIMULATION *****"
set time_start [clock clicks -milliseconds]
cosim_design -trace_level all
set time_end [clock clicks -milliseconds]
report_time "C/RTL SIMULATION" $time_start $time_end
}
if {$opt(export)} {
puts "***** EXPORT IP *****"
set time_start [clock clicks -milliseconds]
export_design -format ip_catalog
set time_end [clock clicks -milliseconds]
report_time "EXPORT IP" $time_start $time_end
}
}

Expand Down
1 change: 1 addition & 0 deletions hls-template/firmware/myproject.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@
#include "nnet_conv2d.h"
#include "nnet_batchnorm.h"
#include "nnet_activation.h"
#include "nnet_pooling.h"

//hls-fpga-machine-learning insert weights

Expand Down
1 change: 1 addition & 0 deletions hls-template/firmware/parameters.h
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@
#include "nnet_activation.h"
#include "nnet_common.h"
#include "nnet_batchnorm.h"
#include "nnet_pooling.h"

//hls-fpga-machine-learning insert numbers

Expand Down
140 changes: 135 additions & 5 deletions hls-writer/hls_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,8 @@ def hls_writer(layer_list, yamlConfig):
newline += '#include "weights/beta{}.h"\n'.format(i)
newline += '#include "weights/scale{}.h"\n'.format(i)
newline += '#include "weights/mean{}.h"\n'.format(i)
elif 'Pooling' in layer_list[i-1]['class_name']:
pass # No weights for pooling
else:
if layer_list[i-1]['n_part']>1:
for i_part in range(layer_list[i-1]['n_part']):
Expand Down Expand Up @@ -156,6 +158,16 @@ def hls_writer(layer_list, yamlConfig):
in_height = 'IN_HEIGHT_{}'.format(i)
in_width = 'IN_WIDTH_{}'.format(i)
n_chan = 'N_CHAN_{}'.format(i)
#Pooling layer
elif 'Pooling' in layer_list[i-1]['class_name']:
input_type = 'layer{}_t'.format(i-1)
input_object = 'layer{}_out'.format(i-1)
output_object = 'layer{}_out'.format(i)
in_height = 'IN_HEIGHT_{}'.format(i)
in_width = 'IN_WIDTH_{}'.format(i)
out_height = 'OUT_HEIGHT_{}'.format(i)
out_width = 'OUT_WIDTH_{}'.format(i)
n_filt = 'N_FILT_{}'.format(i)
#Currently doesn't allow all combinations


Expand Down Expand Up @@ -204,9 +216,9 @@ def hls_writer(layer_list, yamlConfig):
if( i!=len(layer_list) ):
if layer_list[i-1]['class_name']=='Dense' or layer_list[i-1]['class_name']=='BinaryDense' or layer_list[i-1]['class_name']=='TernaryDense' or (layer_list[i-1]['class_name']=='BatchNormalization' and is_dense) or (layer_list[i-1]['class_name'] in activation_layers and is_dense):
newline += ' {} layer{}_out[{}];\n'.format(output_type,i,n_out)
elif layer_list[i-1]['class_name']=='Conv1D':
elif layer_list[i-1]['class_name']=='Conv1D' or 'Pooling1D' in layer_list[i-1]['class_name']:
newline += ' {} layer{}_out[{}*{}];\n'.format(output_type,i,y_out,n_filt)
elif layer_list[i-1]['class_name']=='Conv2D':
elif layer_list[i-1]['class_name']=='Conv2D' or 'Pooling2D' in layer_list[i-1]['class_name']:
newline += ' {} layer{}_out[{}*{}*{}];\n'.format(output_type,i,out_height,out_width,n_filt)
elif layer_list[i-1]['class_name']=='BatchNormalization' and is_conv2d:
if i!= 1: newline += ' {} layer{}_out[{}*{}*{}];\n'.format(output_type,i,out_height,out_width,n_filt)
Expand Down Expand Up @@ -312,7 +324,34 @@ def hls_writer(layer_list, yamlConfig):
newline += ' nnet::normalize<{}, {}, config{}>(logits{}, {}, scale{}, beta{}, mean{});\n'.format(output_type, output_type, i, i, output_object, i, i, i)
elif layer_list[i-1]['class_name'] == 'BatchNormalization' and is_conv2d:
newline += ' nnet::normalize<{}, {}, config{}>({}, {}, scale{}, beta{}, mean{});\n'.format(input_type, output_type, i, input_object, output_object, i, i, i)

elif 'Pooling' in layer_list[i-1]['class_name']:
info = layer_list[i-1]['class_name'].split('Pooling')
d = int(info[1].split('D')[0]) # n dimensions
if d == 1:
newline += ' nnet::pooling1d<{}, config{}>({}, {});\n'.format(input_type, i, input_object, output_object)
elif d == 2:
# Unflatten if needed: if the last layer is activation or batchnorm
unflatten = layer_list[i-2]['class_name'] in activation_layers
unflatten |= 'activation' in layer_list[i-2].keys()
unflatten |= layer_list[i-2]['class_name'] == 'BatchNormalization'
if unflatten:
# Add the unflatten layer
inshape = ''.join('[{0}]'.format(dim) for dim in [in_height, in_width, n_filt])
newline += ' {} pool2d_layer{}_in{};\n'.format(input_type, i, inshape)
if yamlConfig["IOType"] == "io_parallel": newline += ' #pragma HLS ARRAY_PARTITION variable=pool2d_layer{}_in complete dim=0\n'.format(i)
if yamlConfig["IOType"] == "io_serial": newline += ' #pragma HLS STREAM variable=pool2d_layer{}_in depth=1\n'.format(i)
newline += ' nnet::unflatten<{}, {}, {}, {}>({}, pool2d_layer{}_in);\n'.format(input_type, in_height, in_width, n_filt, input_object, i)
outshape = ''.join('[{0}]'.format(dim) for dim in [out_height, out_width, n_filt])
newline += ' {} pool2d_layer{}_out{};\n'.format(input_type, i, outshape)
if yamlConfig["IOType"] == "io_parallel": newline += ' #pragma HLS ARRAY_PARTITION variable=pool2d_layer{}_out complete dim=0\n'.format(i)
if yamlConfig["IOType"] == "io_serial": newline += ' #pragma HLS STREAM variable=pool2d_layer{}_out depth=1\n'.format(i)
# Do the pooling layer
newline += ' nnet::pooling2d<{}, config{i}>(pool2d_layer{i}_in, pool2d_layer{i}_out);\n'.format(input_type, i=i)
else:
newline += ' nnet::pooling2d<{}, config{i}>({}, {});\n'.format(input_type, i, input_object, output_object)
# Flatten the pooling output
newline += ' nnet::flatten<{}, {}, {}, {}>(pool2d_layer{}_out, layer{}_out);\n'.format(input_type, out_height, out_width, n_filt, i, i)

#Activations
if layer_list[i-1]['class_name'] in activation_layers or 'activation' in layer_list[i-1].keys():
if layer_list[i-1]['class_name'] not in activation_layers:
Expand Down Expand Up @@ -464,6 +503,34 @@ def hls_writer(layer_list, yamlConfig):
static const unsigned io_type = nnet::{iotype};
}};\n"""

pooling1d_config_template = """struct config{index} : nnet::pooling1d_config {{
static const unsigned n_in = {n_in};
static const unsigned pool_size = {pool_size};
static const unsigned n_out = {n_out};
static const unsigned pad_left = {pad_left};
static const unsigned pad_right = {pad_right};
static const unsigned stride = {stride};
static const nnet::Pool_Op pool_op = nnet::{Op};
}};\n"""

pooling2d_config_template = """struct config{index} : nnet::pooling2d_config {{
static const unsigned in_height = {in_height};
static const unsigned in_width = {in_width};
static const unsigned n_filt = {n_filt};
static const unsigned stride_height = {stride_height};
static const unsigned stride_width = {stride_width};
static const unsigned pool_height = {pool_height};
static const unsigned pool_width = {pool_width};
static const unsigned out_height = {out_height};
static const unsigned out_width = {out_width};
static const unsigned pad_top = {pad_top};
static const unsigned pad_bottom = {pad_bottom};
static const unsigned pad_left = {pad_left};
static const unsigned pad_right = {pad_right};
static const nnet::Pool_Op pool_op = nnet::{Op};
static const unsigned reuse = {reuse};
}};\n
"""

for line in f.readlines():

Expand Down Expand Up @@ -524,7 +591,26 @@ def hls_writer(layer_list, yamlConfig):
newline += '#define N_LAYER_{} {}\n'.format(i, layer_list[i-1]['n_out'])
newline += '#define OUT_HEIGHT_{} {}\n'.format(i, layer_list[i-1]['in_height'])
newline += '#define OUT_WIDTH_{} {}\n'.format(i, layer_list[i-1]['in_width'])
newline += '#define N_FILT_{} {}\n'.format(i, layer_list[i-1]['n_filt'])
newline += '#define N_FILT_{} {}\n'.format(i, layer_list[i-1]['n_filt'])
elif 'Pooling' in layer_list[i-1]['class_name']:
info = layer_list[i-1]['class_name'].split('Pooling')
d = int(info[1].split('D')[0])
op = info[0]
if d == 1:
newline += '#define Y_INPUTS_{} {}\n'.format(i, layer_list[i-1]['y_in'])
newline += '#define Y_OUTPUTS_{} {}\n'.format(i, layer_list[i-1]['y_out'])
newline += '#define POOL_SIZE_{} {}\n'.format(i, layer_list[i-1]['pool_size'])
elif d == 2:
newline += '#define IN_HEIGHT_{} {}\n'.format(i, layer_list[i-1]['in_height'])
newline += '#define IN_WIDTH_{} {}\n'.format(i, layer_list[i-1]['in_width'])
newline += '#define OUT_HEIGHT_{} {}\n'.format(i, layer_list[i-1]['out_height'])
newline += '#define OUT_WIDTH_{} {}\n'.format(i, layer_list[i-1]['out_width'])
newline += '#define POOL_HEIGHT_{} {}\n'.format(i, layer_list[i-1]['pool_height'])
newline += '#define POOL_WIDTH_{} {}\n'.format(i, layer_list[i-1]['pool_width'])
newline += '#define N_FILT_{} {}\n'.format(i, layer_list[i-1]['n_filt'])
newline += '#define N_LAYER_{} {}\n'.format(i, layer_list[i-1]['n_out'])


elif '//hls-fpga-machine-learning insert layer-precision' in line:
newline = line
for i in range(1,len(layer_list)):
Expand Down Expand Up @@ -586,6 +672,21 @@ def hls_writer(layer_list, yamlConfig):
layer_in_name = "OUT_HEIGHT_{}*OUT_WIDTH_{}*N_FILT_{}".format(i-1, i-1, i-1)
layer_out_name = "N_LAYER_{}".format(i)
layer_n_filt_name = "N_FILT_{}".format(i-1)
elif 'Pooling' in layer_list[i-1]['class_name']:
info = layer_list[i-1]['class_name'].split('Pooling')
d = int(info[1].split('D')[0])
op = info[0]
if d == 1:
layer_y_in_name = "Y_INPUTS_{}".format(i)
layer_y_out_name = "Y_OUTPUTS_{}".format(i)
layer_n_filt_name = "N_FILT_{}".format(i)
elif d == 2:
layer_in_height_name = "IN_HEIGHT_{}".format(i)
layer_in_width_name = "IN_WIDTH_{}".format(i)
layer_out_height_name = "OUT_HEIGHT_{}".format(i)
layer_out_width_name = "OUT_WIDTH_{}".format(i)
layer_n_filt_name = "N_FILT_{}".format(i)
layer_in_name = "N_LAYER_{}".format(i-1)
if layer_list[i-1]['class_name']=='Dense' or layer_list[i-1]['class_name']=='BinaryDense' or layer_list[i-1]['class_name']=='TernaryDense':
if layer_list[i-1]['n_part']==1:
newline += dense_config_template.format(index=str(i),
Expand Down Expand Up @@ -664,14 +765,43 @@ def hls_writer(layer_list, yamlConfig):
index=str(i),
n_in='{}*{}*{}'.format(layer_out_height_name,layer_out_width_name,layer_n_filt_name),
iotype=yamlConfig["IOType"])
elif 'Pooling' in layer_list[i-1]['class_name']:
info = layer_list[i-1]['class_name'].split('Pooling')
d = int(info[1].split('D')[0])
op = info[0]
if d == 1:
newline += pooling1d_config_template.format(index=str(i),
n_in=layer_n_in,
n_out=layer_n_out,
stride=layer_list[i-1]['stride'],
pool_size=layer_list[i-1]['pool_size'],
pad_left=layer_list[i-1]['pad_left'],
pad_right=layer_list[i-1]['pad_right'],
Op=op)
elif d == 2:
newline += pooling2d_config_template.format(index=str(i),
in_height=layer_in_height_name,
in_width=layer_in_width_name,
out_height=layer_out_height_name,
out_width=layer_out_width_name,
n_filt=layer_n_filt_name,
stride_height=layer_list[i-1]['stride_height'],
stride_width=layer_list[i-1]['stride_width'],
pool_height=layer_list[i-1]['pool_height'],
pool_width=layer_list[i-1]['pool_width'],
pad_left=layer_list[i-1]['pad_left'],
pad_right=layer_list[i-1]['pad_right'],
pad_top=layer_list[i-1]['pad_top'],
pad_bottom=layer_list[i-1]['pad_bottom'],
Op=op,
reuse=yamlConfig["ReuseFactor"])

else:
newline = line
fout.write(newline)
f.close()
fout.close()


###################
## test bench
###################
Expand Down
Loading

0 comments on commit abdb2b6

Please sign in to comment.