-
Notifications
You must be signed in to change notification settings - Fork 51
/
Copy pathretraining-example.py
86 lines (71 loc) · 2.96 KB
/
retraining-example.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
"""
Preparing model:
- Install bazel ( check tensorflow's github for more info )
Ubuntu 14.04:
- Requirements:
sudo add-apt-repository ppa:webupd8team/java
sudo apt-get update
sudo apt-get install oracle-java8-installer
- Download bazel, ( https://github.com/bazelbuild/bazel/releases )
tested on: https://github.com/bazelbuild/bazel/releases/download/0.2.0/bazel-0.2.0-jdk7-installer-linux-x86_64.sh
- chmod +x PATH_TO_INSTALL.SH
- ./PATH_TO_INSTALL.SH --user
- Place bazel onto path ( exact path to store shown in the output)
- For retraining, prepare folder structure as
- root_folder_name
- class 1
- file1
- file2
- class 2
- file1
- file2
- Clone tensorflow
- Go to root of tensorflow
- bazel build tensorflow/examples/image_retraining:retrain
- bazel-bin/tensorflow/examples/image_retraining/retrain --image_dir /path/to/root_folder_name --output_graph /path/output_graph.pb --output_labels /path/output_labels.txt --bottleneck_dir /path/bottleneck
** Training done. **
For testing through bazel,
bazel build tensorflow/examples/label_image:label_image && \
bazel-bin/tensorflow/examples/label_image/label_image \
--graph=/path/output_graph.pb --labels=/path/output_labels.txt \
--output_layer=final_result \
--image=/path/to/test/image
For testing through python, change and run this code.
"""
import numpy as np
import tensorflow as tf
imagePath = '/tmp/imagenet/flower.jpg'
modelFullPath = '/tmp/output_graph.pb'
labelsFullPath = '/tmp/output_labels.txt'
def create_graph():
"""Creates a graph from saved GraphDef file and returns a saver."""
# Creates graph from saved graph_def.pb.
with tf.gfile.FastGFile(modelFullPath, 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
_ = tf.import_graph_def(graph_def, name='')
def run_inference_on_image():
answer = None
if not tf.gfile.Exists(imagePath):
tf.logging.fatal('File does not exist %s', imagePath)
return answer
image_data = tf.gfile.FastGFile(imagePath, 'rb').read()
# Creates graph from saved GraphDef.
create_graph()
with tf.Session() as sess:
softmax_tensor = sess.graph.get_tensor_by_name('final_result:0')
predictions = sess.run(softmax_tensor,
{'DecodeJpeg/contents:0': image_data})
predictions = np.squeeze(predictions)
top_k = predictions.argsort()[-5:][::-1] # Getting top 5 predictions
f = open(labelsFullPath, 'rb')
lines = f.readlines()
labels = [str(w).replace("\n", "") for w in lines]
for node_id in top_k:
human_string = labels[node_id]
score = predictions[node_id]
print('%s (score = %.5f)' % (human_string, score))
answer = labels[top_k[0]]
return answer
if __name__ == '__main__':
run_inference_on_image()