Skip to content

Commit 85bedda

Browse files
committed
hps_accel: Add standalone pool test
Adds standalone test for data pooling. Signed-off-by: Alan Green <[email protected]>
1 parent 21a4efb commit 85bedda

File tree

8 files changed

+4510
-38
lines changed

8 files changed

+4510
-38
lines changed

proj/hps_accel/src/conv2d_call.cc

+2-1
Original file line numberDiff line numberDiff line change
@@ -36,11 +36,12 @@ void test_conv2d(const Conv2DData* data) {
3636

3737
const tflite::RuntimeShape& output_shape =
3838
*(reinterpret_cast<const tflite::RuntimeShape*>(data->output_shape));
39+
3940
tflite::reference_integer_ops::ConvPerChannel(
4041
*(reinterpret_cast<const tflite::ConvParams*>(data->params)),
4142
reinterpret_cast<const int32_t*>(data->output_multiplier),
4243
reinterpret_cast<const int32_t*>(data->output_shift), input_shape,
43-
reinterpret_cast<const int8_t*>(arena_input),
44+
arena_input,
4445
*(reinterpret_cast<const tflite::RuntimeShape*>(data->filter_shape)),
4546
reinterpret_cast<const int8_t*>(data->filter_data),
4647
*(reinterpret_cast<const tflite::RuntimeShape*>(data->bias_shape)),

proj/hps_accel/src/pool_03.cc

+4,313
Large diffs are not rendered by default.

proj/hps_accel/src/pool_03.h

+26
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,26 @@
1+
/*
2+
* Copyright 2021 The CFU-Playground Authors
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
#ifndef _POOL_03_H
18+
#define _POOL_03_H
19+
20+
#include <cstdint>
21+
22+
#include "pool_call.h"
23+
24+
extern PoolData pool_03_data;
25+
26+
#endif // _POOL_03_H

proj/hps_accel/src/pool_call.cc

+69
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
/*
2+
* Copyright 2022 The CFU-Playground Authors
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
#include "pool_call.h"
18+
19+
#include <cstdio>
20+
21+
#include "playground_util/dump.h"
22+
#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h"
23+
#include "tflite.h"
24+
25+
void test_pool(const PoolData* data) {
26+
printf("Testing Pool %s\n", data->name);
27+
// Copy input arena
28+
int8_t* arena_input = reinterpret_cast<int8_t*>(tflite_tensor_arena);
29+
auto input_shape =
30+
*(reinterpret_cast<const tflite::RuntimeShape*>(data->input_shape));
31+
for (int i = 0; i < input_shape.FlatSize(); i++) {
32+
arena_input[i] = data->input_data[i];
33+
}
34+
// Set up output arena
35+
int8_t* arena_output =
36+
reinterpret_cast<int8_t*>(tflite_tensor_arena) + 128 * 1024;
37+
const tflite::RuntimeShape& output_shape =
38+
*(reinterpret_cast<const tflite::RuntimeShape*>(data->output_shape));
39+
40+
tflite::reference_integer_ops::MaxPool(
41+
*(reinterpret_cast<const tflite::PoolParams*>(data->params)), input_shape,
42+
arena_input, output_shape, arena_output);
43+
44+
// Check for differences with output
45+
int diff_count = 0;
46+
int first_diff = 0;
47+
int num_words = output_shape.FlatSize() / 4;
48+
const int32_t* arena_words = reinterpret_cast<const int32_t*>(arena_output);
49+
const int32_t* expected_words =
50+
reinterpret_cast<const int32_t*>(data->output_data);
51+
for (int i = 0; i < num_words; i++) {
52+
if (arena_words[i] != expected_words[i]) {
53+
diff_count++;
54+
if (diff_count == 1) {
55+
first_diff = i;
56+
}
57+
}
58+
}
59+
60+
if (diff_count == 0) {
61+
printf("OK - output identical to golden output\n");
62+
} else {
63+
printf("FAIL - %d differences, first at word %d\n", diff_count, first_diff);
64+
printf("actual:\n");
65+
dump_hex(arena_words + first_diff, 16);
66+
printf("expected:\n");
67+
dump_hex(expected_words + first_diff, 16);
68+
}
69+
}

proj/hps_accel/src/pool_call.h

+36
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
/*
2+
* Copyright 2022 The CFU-Playground Authors
3+
*
4+
* Licensed under the Apache License, Version 2.0 (the "License");
5+
* you may not use this file except in compliance with the License.
6+
* You may obtain a copy of the License at
7+
*
8+
* http://www.apache.org/licenses/LICENSE-2.0
9+
*
10+
* Unless required by applicable law or agreed to in writing, software
11+
* distributed under the License is distributed on an "AS IS" BASIS,
12+
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
* See the License for the specific language governing permissions and
14+
* limitations under the License.
15+
*/
16+
17+
#ifndef _POOL_CALL_H
18+
#define _POOL_CALL_H
19+
20+
#include <cstdint>
21+
22+
// Functionality for calling MaxPool from a test harnesses
23+
24+
struct PoolData {
25+
const char* name;
26+
const uint8_t* params;
27+
const uint8_t* input_shape;
28+
const uint8_t* input_data;
29+
const uint8_t* output_shape;
30+
const uint8_t* output_data;
31+
};
32+
33+
// Tests MaxPool with the data in the given structure
34+
void test_pool(const PoolData* data);
35+
36+
#endif // _POOL_CALL_H

proj/hps_accel/src/proj_menu.cc

+5-2
Original file line numberDiff line numberDiff line change
@@ -28,10 +28,11 @@
2828
#include "conv2d_23.h"
2929
#include "conv2d_23p.h"
3030
#include "conv2d_call.h"
31-
#include "fixedpoint/fixedpoint.h"
3231
#include "hps_cfu.h"
3332
#include "menu.h"
3433
#include "playground_util/random.h"
34+
#include "pool_03.h"
35+
#include "pool_call.h"
3536

3637
namespace {
3738

@@ -78,6 +79,7 @@ void do_test_layer_04(void) { test_conv2d(&conv2d_layer_04_data); }
7879
void do_test_layer_05(void) { test_conv2d(&conv2d_layer_05_data); }
7980
void do_test_layer_06(void) { test_conv2d(&conv2d_layer_06_data); }
8081
void do_test_layer_23p(void) { test_conv2d(&conv2d_layer_23p_data); }
82+
void do_test_pool_03(void) { test_pool(&pool_03_data); }
8183

8284
struct Menu MENU = {
8385
"Project Menu",
@@ -97,7 +99,8 @@ struct Menu MENU = {
9799
MENU_ITEM('4', "test layer 04", do_test_layer_04),
98100
MENU_ITEM('5', "test layer 05", do_test_layer_05),
99101
MENU_ITEM('6', "test layer 06", do_test_layer_06),
100-
MENU_ITEM('P', "test layer 23p", do_test_layer_23p),
102+
MENU_ITEM('7', "test layer 23p", do_test_layer_23p),
103+
MENU_ITEM('a', "test pool 03", do_test_pool_03),
101104
MENU_END,
102105
},
103106
};

proj/hps_accel/src/tensorflow/lite/kernels/internal/reference/integer_ops/conv.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ inline void ConvPerChannel(
226226
output_shape, output_data);
227227
}
228228

229-
#ifdef SHOW_OUTPUT_HASHES
229+
#ifdef SHOW_CONV_HASHES
230230
static int hash_layer = 0;
231231
int32_t hash = murmurhash3_32(reinterpret_cast<uint8_t*>(output_data),
232232
output_shape.FlatSize());

proj/hps_accel/src/tensorflow/lite/kernels/internal/reference/integer_ops/pooling.h

+58-34
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ limitations under the License.
1818
#include <cstdio>
1919
#include <limits>
2020

21+
#include "playground_util/murmurhash.h"
2122
#include "tensorflow/lite/kernels/internal/common.h"
2223
#include "tensorflow/lite/kernels/internal/reference/integer_ops/pooling_accel.h"
2324

@@ -84,40 +85,11 @@ inline bool AveragePool(const PoolParams& params,
8485
return true;
8586
}
8687

87-
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
88-
const int8_t* input_data, const RuntimeShape& output_shape,
89-
int8_t* output_data) {
90-
#if SHOW_MAX_POOL_PARAMS
91-
// padding_width, padding_height,
92-
// stride_width, stride_height, filter_height, filter_width,
93-
// quantized_activation_min, quantized_activation_max,
94-
// input_shape[0], input_shape[1], input_shape[2], input_shape[3],
95-
// output_shape[0], output_shape[1], output_shape[2], output_shape[3]
96-
printf("\n");
97-
const auto& padding = params.padding_values;
98-
printf("%d, %d, ", padding.width, padding.height);
99-
printf("%d, %d, %d, %d, ", params.stride_height, params.stride_width,
100-
params.filter_height, params.filter_width);
101-
printf("%ld, %ld, ", params.quantized_activation_min,
102-
params.quantized_activation_max);
103-
printf("%ld, %ld, %ld, %ld, ", input_shape.Dims(0), input_shape.Dims(1),
104-
input_shape.Dims(2), input_shape.Dims(3));
105-
printf("%ld, %ld, %ld, %ld, ", output_shape.Dims(0), output_shape.Dims(1),
106-
output_shape.Dims(2), output_shape.Dims(3));
107-
108-
printf("\n");
109-
#endif
110-
111-
#ifdef ACCEL_MAX_POOL
112-
#if GATEWARE_GEN != 2
113-
#error MAX_POOL op requires gateware gen 2
114-
#endif
115-
if (CanAccelerateMaxPool(params, input_shape, output_shape)) {
116-
return AccelerateMaxPool(params, input_shape, input_data, output_shape,
117-
output_data);
118-
}
119-
#endif
120-
88+
inline void UnacceleratedMaxPool(const PoolParams& params,
89+
const RuntimeShape& input_shape,
90+
const int8_t* input_data,
91+
const RuntimeShape& output_shape,
92+
int8_t* output_data) {
12193
TFLITE_DCHECK_LE(params.quantized_activation_min,
12294
params.quantized_activation_max);
12395
TFLITE_DCHECK_GE(params.quantized_activation_min,
@@ -172,6 +144,58 @@ inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
172144
}
173145
}
174146

147+
inline void MaxPool(const PoolParams& params, const RuntimeShape& input_shape,
148+
const int8_t* input_data, const RuntimeShape& output_shape,
149+
int8_t* output_data) {
150+
#if SHOW_MAX_POOL_PARAMS
151+
// padding_width, padding_height,
152+
// stride_width, stride_height, filter_height, filter_width,
153+
// quantized_activation_min, quantized_activation_max,
154+
// input_shape[0], input_shape[1], input_shape[2], input_shape[3],
155+
// output_shape[0], output_shape[1], output_shape[2], output_shape[3]
156+
printf("\n");
157+
const auto& padding = params.padding_values;
158+
printf("%d, %d, ", padding.width, padding.height);
159+
printf("%d, %d, %d, %d, ", params.stride_height, params.stride_width,
160+
params.filter_height, params.filter_width);
161+
printf("%ld, %ld, ", params.quantized_activation_min,
162+
params.quantized_activation_max);
163+
printf("%ld, %ld, %ld, %ld, ", input_shape.Dims(0), input_shape.Dims(1),
164+
input_shape.Dims(2), input_shape.Dims(3));
165+
printf("%ld, %ld, %ld, %ld, ", output_shape.Dims(0), output_shape.Dims(1),
166+
output_shape.Dims(2), output_shape.Dims(3));
167+
168+
printf("\n");
169+
#endif
170+
171+
bool accelerated = false;
172+
173+
#ifdef ACCEL_MAX_POOL
174+
#if GATEWARE_GEN != 2
175+
#error MAX_POOL op requires gateware gen 2
176+
#endif
177+
accelerated = CanAccelerateMaxPool(params, input_shape, output_shape);
178+
if (accelerated) {
179+
AccelerateMaxPool(params, input_shape, input_data, output_shape,
180+
output_data);
181+
}
182+
#endif
183+
if (!accelerated) {
184+
UnacceleratedMaxPool(params, input_shape, input_data, output_shape,
185+
output_data);
186+
}
187+
188+
#ifdef SHOW_POOL_HASHES
189+
static int hash_layer = 0;
190+
int32_t input_hash = murmurhash3_32(reinterpret_cast<const uint8_t*>(input_data),
191+
input_shape.FlatSize());
192+
int32_t output_hash = murmurhash3_32(reinterpret_cast<const uint8_t*>(output_data),
193+
output_shape.FlatSize());
194+
printf("%3d, %08lx, %08lx\n", hash_layer, input_hash, output_hash);
195+
hash_layer++;
196+
#endif
197+
}
198+
175199
inline bool AveragePool(const PoolParams& params,
176200
const RuntimeShape& input_shape,
177201
const int16_t* input_data,

0 commit comments

Comments
 (0)