Skip to content

Commit d92e584

Browse files
committed
Creado mediante Colaboratory
1 parent 57280cb commit d92e584

File tree

1 file changed

+381
-0
lines changed

1 file changed

+381
-0
lines changed

MNIST_Android.ipynb

Lines changed: 381 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,381 @@
1+
{
2+
"nbformat": 4,
3+
"nbformat_minor": 0,
4+
"metadata": {
5+
"kernelspec": {
6+
"display_name": "Python 3",
7+
"language": "python",
8+
"name": "python3"
9+
},
10+
"language_info": {
11+
"codemirror_mode": {
12+
"name": "ipython",
13+
"version": 3
14+
},
15+
"file_extension": ".py",
16+
"mimetype": "text/x-python",
17+
"name": "python",
18+
"nbconvert_exporter": "python",
19+
"pygments_lexer": "ipython3",
20+
"version": "3.8.2"
21+
},
22+
"colab": {
23+
"name": "MNIST-Android.ipynb",
24+
"provenance": [],
25+
"include_colab_link": true
26+
},
27+
"accelerator": "GPU"
28+
},
29+
"cells": [
30+
{
31+
"cell_type": "markdown",
32+
"metadata": {
33+
"id": "view-in-github",
34+
"colab_type": "text"
35+
},
36+
"source": [
37+
"<a href=\"https://colab.research.google.com/github/FelosRG/MNIST-Android/blob/main/MNIST_Android.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
38+
]
39+
},
40+
{
41+
"cell_type": "markdown",
42+
"metadata": {
43+
"id": "2JH6DeldnsAp"
44+
},
45+
"source": [
46+
"# MNIST KERAS"
47+
]
48+
},
49+
{
50+
"cell_type": "code",
51+
"metadata": {
52+
"colab": {
53+
"base_uri": "https://localhost:8080/"
54+
},
55+
"id": "Cqzir-FDnsAv",
56+
"outputId": "3708ce26-8c61-43a0-d250-5d0e272f1596"
57+
},
58+
"source": [
59+
"# REVISAMOS EL DISPOSITIVO EN EL QUE ESTAMOS TRABAJANDO\n",
60+
"\n",
61+
"from tensorflow.python.client import device_lib\n",
62+
"print(device_lib.list_local_devices())\n"
63+
],
64+
"execution_count": 1,
65+
"outputs": [
66+
{
67+
"output_type": "stream",
68+
"text": [
69+
"[name: \"/device:CPU:0\"\n",
70+
"device_type: \"CPU\"\n",
71+
"memory_limit: 268435456\n",
72+
"locality {\n",
73+
"}\n",
74+
"incarnation: 2726610591092874326\n",
75+
", name: \"/device:XLA_CPU:0\"\n",
76+
"device_type: \"XLA_CPU\"\n",
77+
"memory_limit: 17179869184\n",
78+
"locality {\n",
79+
"}\n",
80+
"incarnation: 10041975974662050738\n",
81+
"physical_device_desc: \"device: XLA_CPU device\"\n",
82+
", name: \"/device:XLA_GPU:0\"\n",
83+
"device_type: \"XLA_GPU\"\n",
84+
"memory_limit: 17179869184\n",
85+
"locality {\n",
86+
"}\n",
87+
"incarnation: 3570962965847598056\n",
88+
"physical_device_desc: \"device: XLA_GPU device\"\n",
89+
", name: \"/device:GPU:0\"\n",
90+
"device_type: \"GPU\"\n",
91+
"memory_limit: 14640891840\n",
92+
"locality {\n",
93+
" bus_id: 1\n",
94+
" links {\n",
95+
" }\n",
96+
"}\n",
97+
"incarnation: 12700630950342469278\n",
98+
"physical_device_desc: \"device: 0, name: Tesla T4, pci bus id: 0000:00:04.0, compute capability: 7.5\"\n",
99+
"]\n"
100+
],
101+
"name": "stdout"
102+
}
103+
]
104+
},
105+
{
106+
"cell_type": "code",
107+
"metadata": {
108+
"id": "Gk-TG594nsAx"
109+
},
110+
"source": [
111+
"# SETUP\n",
112+
"\n",
113+
"import numpy as np\n",
114+
"from tensorflow import keras\n",
115+
"from tensorflow.keras import layers\n",
116+
"import tensorflow as tf\n"
117+
],
118+
"execution_count": 2,
119+
"outputs": []
120+
},
121+
{
122+
"cell_type": "code",
123+
"metadata": {
124+
"colab": {
125+
"base_uri": "https://localhost:8080/"
126+
},
127+
"id": "jJQuP-f7nsAx",
128+
"outputId": "ed7c34a3-f1e7-4308-b10b-2287f815b579"
129+
},
130+
"source": [
131+
"# PREPARACIÓN DE DATOS\n",
132+
"\n",
133+
"num_classes = 10\n",
134+
"input_shape = (28, 28, 1)\n",
135+
"\n",
136+
"# Separamos entre los conjuntos de train y test\n",
137+
"(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()\n",
138+
"\n",
139+
"# Normalización\n",
140+
"x_train = x_train.astype(\"float32\") / 255\n",
141+
"x_test = x_test.astype(\"float32\") / 255\n",
142+
"\n",
143+
"# Convertimos los tesores a la forma adecuada.\n",
144+
"x_train = np.expand_dims(x_train, -1)\n",
145+
"x_test = np.expand_dims(x_test, -1)\n",
146+
"\n",
147+
"print(\"x_train shape:\", x_train.shape)\n",
148+
"print(x_train.shape[0], \"train samples\")\n",
149+
"print(x_test.shape[0], \"test samples\")\n",
150+
"\n",
151+
"\n",
152+
"# Pasamos los labels a one-hot econding \n",
153+
"y_train = keras.utils.to_categorical(y_train, num_classes)\n",
154+
"y_test = keras.utils.to_categorical(y_test, num_classes)"
155+
],
156+
"execution_count": 3,
157+
"outputs": [
158+
{
159+
"output_type": "stream",
160+
"text": [
161+
"Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz\n",
162+
"11493376/11490434 [==============================] - 0s 0us/step\n",
163+
"x_train shape: (60000, 28, 28, 1)\n",
164+
"60000 train samples\n",
165+
"10000 test samples\n"
166+
],
167+
"name": "stdout"
168+
}
169+
]
170+
},
171+
{
172+
"cell_type": "code",
173+
"metadata": {
174+
"colab": {
175+
"base_uri": "https://localhost:8080/"
176+
},
177+
"id": "-tddkKRInsAy",
178+
"outputId": "b6ac3c58-1fcd-409c-8486-5d414eec62e2"
179+
},
180+
"source": [
181+
"# MODELO\n",
182+
"\n",
183+
"model = keras.Sequential(\n",
184+
" [\n",
185+
" keras.Input(shape=input_shape),\n",
186+
" layers.Conv2D(32, kernel_size=(3, 3), activation=\"relu\"),\n",
187+
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
188+
" layers.Conv2D(64, kernel_size=(3, 3), activation=\"relu\"),\n",
189+
" layers.MaxPooling2D(pool_size=(2, 2)),\n",
190+
" layers.Flatten(),\n",
191+
" layers.Dropout(0.5),\n",
192+
" layers.Dense(num_classes, activation=\"softmax\"),\n",
193+
" ]\n",
194+
")\n",
195+
"\n",
196+
"model.summary()\n"
197+
],
198+
"execution_count": 4,
199+
"outputs": [
200+
{
201+
"output_type": "stream",
202+
"text": [
203+
"Model: \"sequential\"\n",
204+
"_________________________________________________________________\n",
205+
"Layer (type) Output Shape Param # \n",
206+
"=================================================================\n",
207+
"conv2d (Conv2D) (None, 26, 26, 32) 320 \n",
208+
"_________________________________________________________________\n",
209+
"max_pooling2d (MaxPooling2D) (None, 13, 13, 32) 0 \n",
210+
"_________________________________________________________________\n",
211+
"conv2d_1 (Conv2D) (None, 11, 11, 64) 18496 \n",
212+
"_________________________________________________________________\n",
213+
"max_pooling2d_1 (MaxPooling2 (None, 5, 5, 64) 0 \n",
214+
"_________________________________________________________________\n",
215+
"flatten (Flatten) (None, 1600) 0 \n",
216+
"_________________________________________________________________\n",
217+
"dropout (Dropout) (None, 1600) 0 \n",
218+
"_________________________________________________________________\n",
219+
"dense (Dense) (None, 10) 16010 \n",
220+
"=================================================================\n",
221+
"Total params: 34,826\n",
222+
"Trainable params: 34,826\n",
223+
"Non-trainable params: 0\n",
224+
"_________________________________________________________________\n"
225+
],
226+
"name": "stdout"
227+
}
228+
]
229+
},
230+
{
231+
"cell_type": "code",
232+
"metadata": {
233+
"colab": {
234+
"base_uri": "https://localhost:8080/"
235+
},
236+
"id": "T_xqtaOPnsAy",
237+
"outputId": "80bcd381-5333-4b9b-dfde-715e9ee481ca"
238+
},
239+
"source": [
240+
"# ENTRENAMIENTO\n",
241+
"\n",
242+
"batch_size = 128\n",
243+
"epochs = 15\n",
244+
"\n",
245+
"model.compile(loss=\"categorical_crossentropy\", optimizer=\"adam\", metrics=[\"accuracy\"])\n",
246+
"\n",
247+
"model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, validation_split=0.1)"
248+
],
249+
"execution_count": 5,
250+
"outputs": [
251+
{
252+
"output_type": "stream",
253+
"text": [
254+
"Epoch 1/15\n",
255+
"422/422 [==============================] - 2s 5ms/step - loss: 0.3643 - accuracy: 0.8892 - val_loss: 0.0829 - val_accuracy: 0.9785\n",
256+
"Epoch 2/15\n",
257+
"422/422 [==============================] - 2s 5ms/step - loss: 0.1144 - accuracy: 0.9656 - val_loss: 0.0626 - val_accuracy: 0.9828\n",
258+
"Epoch 3/15\n",
259+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0865 - accuracy: 0.9737 - val_loss: 0.0466 - val_accuracy: 0.9858\n",
260+
"Epoch 4/15\n",
261+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0725 - accuracy: 0.9775 - val_loss: 0.0422 - val_accuracy: 0.9878\n",
262+
"Epoch 5/15\n",
263+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0632 - accuracy: 0.9804 - val_loss: 0.0376 - val_accuracy: 0.9902\n",
264+
"Epoch 6/15\n",
265+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0556 - accuracy: 0.9827 - val_loss: 0.0363 - val_accuracy: 0.9898\n",
266+
"Epoch 7/15\n",
267+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0520 - accuracy: 0.9840 - val_loss: 0.0365 - val_accuracy: 0.9905\n",
268+
"Epoch 8/15\n",
269+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0475 - accuracy: 0.9854 - val_loss: 0.0327 - val_accuracy: 0.9903\n",
270+
"Epoch 9/15\n",
271+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0451 - accuracy: 0.9860 - val_loss: 0.0325 - val_accuracy: 0.9910\n",
272+
"Epoch 10/15\n",
273+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0408 - accuracy: 0.9872 - val_loss: 0.0339 - val_accuracy: 0.9912\n",
274+
"Epoch 11/15\n",
275+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0393 - accuracy: 0.9873 - val_loss: 0.0299 - val_accuracy: 0.9922\n",
276+
"Epoch 12/15\n",
277+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0389 - accuracy: 0.9878 - val_loss: 0.0290 - val_accuracy: 0.9922\n",
278+
"Epoch 13/15\n",
279+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0344 - accuracy: 0.9888 - val_loss: 0.0299 - val_accuracy: 0.9908\n",
280+
"Epoch 14/15\n",
281+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0356 - accuracy: 0.9885 - val_loss: 0.0302 - val_accuracy: 0.9917\n",
282+
"Epoch 15/15\n",
283+
"422/422 [==============================] - 2s 4ms/step - loss: 0.0327 - accuracy: 0.9896 - val_loss: 0.0299 - val_accuracy: 0.9927\n"
284+
],
285+
"name": "stdout"
286+
},
287+
{
288+
"output_type": "execute_result",
289+
"data": {
290+
"text/plain": [
291+
"<tensorflow.python.keras.callbacks.History at 0x7fa19c577f98>"
292+
]
293+
},
294+
"metadata": {
295+
"tags": []
296+
},
297+
"execution_count": 5
298+
}
299+
]
300+
},
301+
{
302+
"cell_type": "code",
303+
"metadata": {
304+
"scrolled": true,
305+
"colab": {
306+
"base_uri": "https://localhost:8080/"
307+
},
308+
"id": "m51eeUlFnsAz",
309+
"outputId": "86ad0573-be6d-4706-c3ca-581a2537633a"
310+
},
311+
"source": [
312+
"# EVALUACIÓN DEL MODELO\n",
313+
"\n",
314+
"score = model.evaluate(x_test, y_test, verbose=0)\n",
315+
"print(\"Test loss:\", score[0])\n",
316+
"print(\"Test accuracy:\", score[1])"
317+
],
318+
"execution_count": 6,
319+
"outputs": [
320+
{
321+
"output_type": "stream",
322+
"text": [
323+
"Test loss: 0.027707671746611595\n",
324+
"Test accuracy: 0.9901999831199646\n"
325+
],
326+
"name": "stdout"
327+
}
328+
]
329+
},
330+
{
331+
"cell_type": "code",
332+
"metadata": {
333+
"colab": {
334+
"base_uri": "https://localhost:8080/"
335+
},
336+
"id": "O-lnWhFnnsAz",
337+
"outputId": "d8d0e89f-bdfe-4e0d-d143-662453839234"
338+
},
339+
"source": [
340+
"# Guardanos el modelo de tensorlfow\n",
341+
"name = \"/home/MNIST_andoid_model.HDF5\"\n",
342+
"model.save(name)"
343+
],
344+
"execution_count": 10,
345+
"outputs": [
346+
{
347+
"output_type": "stream",
348+
"text": [
349+
"INFO:tensorflow:Assets written to: /home/MNIST_andoid_model.HDF5/assets\n"
350+
],
351+
"name": "stdout"
352+
},
353+
{
354+
"output_type": "stream",
355+
"text": [
356+
"INFO:tensorflow:Assets written to: /home/MNIST_andoid_model.HDF5/assets\n"
357+
],
358+
"name": "stderr"
359+
}
360+
]
361+
},
362+
{
363+
"cell_type": "code",
364+
"metadata": {
365+
"id": "-HYQ9LM2nsAz"
366+
},
367+
"source": [
368+
"# Convert the model\n",
369+
"\n",
370+
"converter = tf.lite.TFLiteConverter.from_saved_model(name) # path to the SavedModel directory\n",
371+
"tflite_model = converter.convert()\n",
372+
"\n",
373+
"# Save the model.\n",
374+
"with open('model.tflite', 'wb') as f:\n",
375+
" f.write(tflite_model)"
376+
],
377+
"execution_count": 11,
378+
"outputs": []
379+
}
380+
]
381+
}

0 commit comments

Comments
 (0)