From 39dc329808c454f39415a5de69d5dcfef7029484 Mon Sep 17 00:00:00 2001 From: Patrice Ferlet Date: Fri, 23 Jul 2021 16:36:13 +0200 Subject: [PATCH] Use Tensorflow 2.5 now, go to 1.1 version 1.1 releases supports now tensorflow >=2.5 see #46 --- setup.py | 29 +++---- src/keras_video/flow.py | 64 +++++++++------ src/keras_video/generator.py | 155 ++++++++++++++++++----------------- tests/basic.py | 30 ++++--- tests/opticalflow.py | 25 +++--- tests/slinding.py | 31 +++---- 6 files changed, 176 insertions(+), 158 deletions(-) diff --git a/setup.py b/setup.py index 93583c0..05e68f8 100644 --- a/setup.py +++ b/setup.py @@ -1,27 +1,22 @@ #!/usr/bin/env python -from setuptools import setup, find_packages +from setuptools import find_packages, setup with open("README.md", "r") as fh: long_description = fh.read() setup( - name='keras-video-generators', - version='1.0.13', - description='Keras sequence generators for video data', + name="keras-video-generators", + version="1.1.0", + description="Keras sequence generators for video data", long_description=long_description, long_description_content_type="text/markdown", - license='MIT', - licence_file='LICENSE', - author='Patrice Ferlet', - author_email='metal3d@gmail.com', - url='https://github.com/metal3d/keras-video-generators', - package_dir={'': 'src'}, - packages=find_packages('src'), - install_requires=[ - 'keras>=2', - 'numpy', - 'opencv-python', - 'matplotlib' - ] + license="MIT", + licence_file="LICENSE", + author="Patrice Ferlet", + author_email="metal3d@gmail.com", + url="https://github.com/metal3d/keras-video-generators", + package_dir={"": "src"}, + packages=find_packages("src"), + install_requires=["tensorflow>=2.5", "numpy", "opencv-python", "matplotlib"], ) diff --git a/src/keras_video/flow.py b/src/keras_video/flow.py index a253069..e8d1242 100644 --- a/src/keras_video/flow.py +++ b/src/keras_video/flow.py @@ -16,10 +16,11 @@ """ -import numpy as np import cv2 as cv +import numpy as np +import tensorflow.keras.preprocessing.image as kimage + from .generator import VideoFrameGenerator -import keras.preprocessing.image as kimage METHOD_OPTICAL_FLOW = 1 METHOD_FLOW_MASK = 2 @@ -57,15 +58,16 @@ class OpticalFlowGenerator(VideoFrameGenerator): """ def __init__( - self, - *args, - nb_frames=5, - method=METHOD_OPTICAL_FLOW, - flowlevel=3, - iterations=3, - winsize=15, - **kwargs): - super().__init__(nb_frames=nb_frames+1, *args, **kwargs) + self, + *args, + nb_frames=5, + method=METHOD_OPTICAL_FLOW, + flowlevel=3, + iterations=3, + winsize=15, + **kwargs + ): + super().__init__(nb_frames=nb_frames + 1, *args, **kwargs) self.flowlevel = flowlevel self.iterations = iterations self.winsize = winsize @@ -107,13 +109,20 @@ def make_optical_flow(self, images): images[i] = cv.cvtColor(image, cv.COLOR_RGB2GRAY) flow = cv.calcOpticalFlowFarneback( - images[0], images[1], # image prev and next - None, 0.5, self.flowlevel, - self.winsize, self.iterations, - 5, 1.1, 0) + images[0], + images[1], # image prev and next + None, + 0.5, + self.flowlevel, + self.winsize, + self.iterations, + 5, + 1.1, + 0, + ) mag, ang = cv.cartToPolar(flow[..., 0], flow[..., 1]) - hsv[..., 0] = ang*180/np.pi/2 + hsv[..., 0] = ang * 180 / np.pi / 2 hsv[..., 2] = cv.normalize(mag, None, 0, 255, cv.NORM_MINMAX) rgb = cv.cvtColor(hsv, cv.COLOR_HSV2BGR) @@ -123,12 +132,11 @@ def make_optical_flow(self, images): return rgb def diff_mask(self, images): - """ Get absolute diff mask, then merge frames and apply the mask - """ + """Get absolute diff mask, then merge frames and apply the mask""" mask = self.absdiff(images) mask = cv.GaussianBlur(mask, (15, 15), 0) - image = cv.addWeighted(images[0], .5, images[1], .5, 0) + image = cv.addWeighted(images[0], 0.5, images[1], 0.5, 0) return cv.multiply(image, mask) @@ -136,10 +144,10 @@ def flow_mask(self, images): """ Get optical flow on images, then merge images and apply the mask """ - mask = self.make_optical_flow(images) / 255. + mask = self.make_optical_flow(images) / 255.0 mask = cv.GaussianBlur(mask, (15, 15), 0) - image = cv.addWeighted(images[0], .5, images[1], .5, 0) + image = cv.addWeighted(images[0], 0.5, images[1], 0.5, 0) return cv.multiply(image, mask) @@ -147,7 +155,7 @@ def get_validation_generator(self): """ Return the validation generator if you've provided split factor """ return self.__class__( method=self.method, - nb_frames=self.nbframe-1, + nb_frames=self.nbframe - 1, nb_channel=self.nb_channel, target_shape=self.target_shape, classes=self.classes, @@ -155,13 +163,14 @@ def get_validation_generator(self): shuffle=self.shuffle, rescale=self.rescale, glob_pattern=self.glob_pattern, - _validation_data=self.validation) + _validation_data=self.validation, + ) def get_test_generator(self): """ Return the validation generator if you've provided split factor """ return self.__class__( method=self.method, - nb_frames=self.nbframe-1, + nb_frames=self.nbframe - 1, nb_channel=self.nb_channel, target_shape=self.target_shape, classes=self.classes, @@ -169,7 +178,8 @@ def get_test_generator(self): shuffle=self.shuffle, rescale=self.rescale, glob_pattern=self.glob_pattern, - _test_data=self.test) + _test_data=self.test, + ) def __getitem__(self, idx): batch = super().__getitem__(idx) @@ -179,9 +189,9 @@ def __getitem__(self, idx): imgs = item batch_len = len(imgs) frames = [] - for i in range(batch_len-1): + for i in range(batch_len - 1): im1 = imgs[i] - im2 = imgs[i+1] + im2 = imgs[i + 1] if self.method == METHOD_OPTICAL_FLOW: image = self.make_optical_flow((im1, im2)) elif self.method == METHOD_ABS_DIFF: diff --git a/src/keras_video/generator.py b/src/keras_video/generator.py index 24237f9..4329dcf 100644 --- a/src/keras_video/generator.py +++ b/src/keras_video/generator.py @@ -6,18 +6,19 @@ and that have no noise frames. """ -import os import glob -import numpy as np -import cv2 as cv -from math import floor import logging +import os import re -log = logging.getLogger() +from math import floor + +import cv2 as cv +import numpy as np +from tensorflow.keras.preprocessing.image import (ImageDataGenerator, + img_to_array) +from tensorflow.keras.utils import Sequence -from keras.utils import Sequence -from keras.preprocessing.image import \ - ImageDataGenerator, img_to_array +log = logging.getLogger() class VideoFrameGenerator(Sequence): @@ -50,30 +51,33 @@ class VideoFrameGenerator(Sequence): """ def __init__( - self, - rescale=1/255., - nb_frames: int = 5, - classes: list = None, - batch_size: int = 16, - use_frame_cache: bool = False, - target_shape: tuple = (224, 224), - shuffle: bool = True, - transformation: ImageDataGenerator = None, - split_test: float = None, - split_val: float = None, - nb_channel: int = 3, - glob_pattern: str = './videos/{classname}/*.avi', - use_headers: bool = True, - *args, - **kwargs): + self, + rescale=1 / 255.0, + nb_frames: int = 5, + classes: list = None, + batch_size: int = 16, + use_frame_cache: bool = False, + target_shape: tuple = (224, 224), + shuffle: bool = True, + transformation: ImageDataGenerator = None, + split_test: float = None, + split_val: float = None, + nb_channel: int = 3, + glob_pattern: str = "./videos/{classname}/*.avi", + use_headers: bool = True, + *args, + **kwargs + ): # deprecation - if 'split' in kwargs: - log.warn("Warning, `split` argument is replaced by `split_val`, " - "please condider to change your source code." - "The `split` argument will be removed " - "in future releases.") - split_val = float(kwargs.get('split')) + if "split" in kwargs: + log.warn( + "Warning, `split` argument is replaced by `split_val`, " + "please condider to change your source code." + "The `split` argument will be removed " + "in future releases." + ) + split_val = float(kwargs.get("split")) self.glob_pattern = glob_pattern @@ -85,12 +89,14 @@ def __init__( # we should have classes if len(classes) == 0: - log.warn("You didn't provide classes list or " - "we were not able to discover them from " - "your pattern.\n" - "Please check if the path is OK, and if the glob " - "pattern is correct.\n" - "See https://docs.python.org/3/library/glob.html") + log.warn( + "You didn't provide classes list or " + "we were not able to discover them from " + "your pattern.\n" + "Please check if the path is OK, and if the glob " + "pattern is correct.\n" + "See https://docs.python.org/3/library/glob.html" + ) # shape size should be 2 assert len(target_shape) == 2 @@ -127,8 +133,8 @@ def __init__( self.validation = [] self.test = [] - _validation_data = kwargs.get('_validation_data', None) - _test_data = kwargs.get('_test_data', None) + _validation_data = kwargs.get("_validation_data", None) + _test_data = kwargs.get("_test_data", None) if _validation_data is not None: # we only need to set files here @@ -160,8 +166,7 @@ def __init__( val = np.random.permutation(indexes)[:nbval] # remove validation from train - indexes = np.array( - [i for i in indexes if i not in val]) + indexes = np.array([i for i in indexes if i not in val]) self.validation += [files[i] for i in val] info.append("validation count: %d" % nbval) @@ -173,15 +178,16 @@ def __init__( val_test = np.random.permutation(indexes)[:nbtest] # remove test from train - indexes = np.array( - [i for i in indexes if i not in val_test]) + indexes = np.array([i for i in indexes if i not in val_test]) self.test += [files[i] for i in val_test] info.append("test count: %d" % nbtest) # and now, make the file list self.files += [files[i] for i in indexes] - print("class %s, %s, train count: %d" % - (cls, ", ".join(info), nbtrain)) + print( + "class %s, %s, train count: %d" + % (cls, ", ".join(info), nbtrain) + ) else: for cls in classes: @@ -193,7 +199,7 @@ def __init__( self.classes_count = len(classes) # to initialize transformations and shuffle indices - if 'no_epoch_at_init' not in kwargs: + if "no_epoch_at_init" not in kwargs: self.on_epoch_end() kind = "train" @@ -204,14 +210,14 @@ def __init__( self._current = 0 self._framecounters = {} - print("Total data: %d classes for %d files for %s" % ( - self.classes_count, - self.files_count, - kind)) + print( + "Total data: %d classes for %d files for %s" + % (self.classes_count, self.files_count, kind) + ) def count_frames(self, cap, name, force_no_headers=False): - """ Count number of frame for video - if it's not possible with headers """ + """Count number of frame for video + if it's not possible with headers""" if not force_no_headers and name in self._framecounters: return self._framecounters[name] @@ -239,10 +245,10 @@ def count_frames(self, cap, name, force_no_headers=False): def _discover_classes(self): pattern = os.path.realpath(self.glob_pattern) pattern = re.escape(pattern) - pattern = pattern.replace('\\{classname\\}', '(.*?)') - pattern = pattern.replace('\\*', '.*') + pattern = pattern.replace("\\{classname\\}", "(.*?)") + pattern = pattern.replace("\\*", ".*") - files = glob.glob(self.glob_pattern.replace('{classname}', '*')) + files = glob.glob(self.glob_pattern.replace("{classname}", "*")) classes = set() for f in files: f = os.path.realpath(f) @@ -273,7 +279,8 @@ def get_validation_generator(self): rescale=self.rescale, glob_pattern=self.glob_pattern, use_headers=self.use_video_header, - _validation_data=self.validation) + _validation_data=self.validation, + ) def get_test_generator(self): """ Return the validation generator if you've provided split factor """ @@ -287,7 +294,8 @@ def get_test_generator(self): rescale=self.rescale, glob_pattern=self.glob_pattern, use_headers=self.use_video_header, - _test_data=self.test) + _test_data=self.test, + ) def on_epoch_end(self): """ Called by Keras after each epoch """ @@ -319,7 +327,7 @@ def __getitem__(self, index): labels = [] images = [] - indexes = self.indexes[index*self.batch_size:(index+1)*self.batch_size] + indexes = self.indexes[index * self.batch_size : (index + 1) * self.batch_size] transformation = None @@ -334,14 +342,12 @@ def __getitem__(self, index): # create a label array and set 1 to the right column label = np.zeros(len(classes)) col = classes.index(classname) - label[col] = 1. + label[col] = 1.0 if video not in self.__frame_cache: frames = self._get_frames( - video, - nbframe, - shape, - force_no_headers=not self.use_video_header) + video, nbframe, shape, force_no_headers=not self.use_video_header + ) if frames is None: # avoid failure, nevermind that video... continue @@ -355,8 +361,10 @@ def __getitem__(self, index): # apply transformation if transformation is not None: - frames = [self.transformation.apply_transform( - frame, transformation) for frame in frames] + frames = [ + self.transformation.apply_transform(frame, transformation) + for frame in frames + ] # add the sequence in batch images.append(frames) @@ -375,10 +383,10 @@ def _get_classname(self, video: str) -> str: pattern = re.escape(pattern) # get back "*" to make it ".*" in regexp - pattern = pattern.replace('\\*', '.*') + pattern = pattern.replace("\\*", ".*") # use {classname} as a capture - pattern = pattern.replace('\\{classname\\}', '(.*?)') + pattern = pattern.replace("\\{classname\\}", "(.*?)") # and find all occurence classname = re.findall(pattern, video)[0] @@ -390,7 +398,7 @@ def _get_frames(self, video, nbframe, shape, force_no_headers=False): orig_total = total_frames if total_frames % 2 != 0: total_frames += 1 - frame_step = floor(total_frames/(nbframe-1)) + frame_step = floor(total_frames / (nbframe - 1)) # TODO: fix that, a tiny video can have a frame_step that is # under 1 frame_step = max(1, frame_step) @@ -429,18 +437,15 @@ def _get_frames(self, video, nbframe, shape, force_no_headers=False): # That means that frame count in header is wrong or broken, # so we need to force the full read of video to get the right # frame counter - return self._get_frames( - video, - nbframe, - shape, - force_no_headers=True) + return self._get_frames(video, nbframe, shape, force_no_headers=True) if force_no_headers and len(frames) != nbframe: # and if we really couldn't find the real frame counter # so we return None. Sorry, nothing can be done... - log.error("Frame count is not OK for video %s, " - "%d total, %d extracted" % ( - video, total_frames, len(frames))) + log.error( + "Frame count is not OK for video %s, " + "%d total, %d extracted" % (video, total_frames, len(frames)) + ) return None return np.array(frames) diff --git a/tests/basic.py b/tests/basic.py index c241339..5205900 100644 --- a/tests/basic.py +++ b/tests/basic.py @@ -1,14 +1,17 @@ -import keras_video -import unittest import os -import sys import shutil -sys.path.insert(0, './src') +import sys +import unittest + +import keras_video +from tensorflow import keras + +sys.path.insert(0, "./src") class TestDiscovery(unittest.TestCase): - testdir = 'test_vids' + testdir = "test_vids" def setUp(self): dirname = self.testdir @@ -16,12 +19,11 @@ def setUp(self): def _write_zero(cl, i): shutil.copy( - 'tests/vidtest.ogv', - os.path.join(self.testdir, '%s_%d.ogv' % (cl, i)) + "tests/vidtest.ogv", os.path.join(self.testdir, "%s_%d.ogv" % (cl, i)) ) for i in range(10): - for cl in ['A', 'B', 'C']: + for cl in ["A", "B", "C"]: _write_zero(cl, i) def tearDown(self): @@ -31,10 +33,11 @@ def test_find_classes(self): """ Check classe auto discovery """ g = keras_video.VideoFrameGenerator( - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv')) - assert 'A' in g.classes - assert 'B' in g.classes - assert 'C' in g.classes + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv") + ) + assert "A" in g.classes + assert "B" in g.classes + assert "C" in g.classes assert g.files_count == 30 @@ -44,7 +47,8 @@ def test_iterator(self): batch_size=4, nb_frames=6, target_shape=(64, 64), - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv')) + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv"), + ) # iterator object should be able to # use "next()" function diff --git a/tests/opticalflow.py b/tests/opticalflow.py index 8166c84..4455cc2 100644 --- a/tests/opticalflow.py +++ b/tests/opticalflow.py @@ -1,15 +1,17 @@ -import keras_video -import keras -import unittest import os -import sys import shutil -sys.path.insert(0, './src') +import sys +import unittest + +import keras_video +from tensorflow import keras + +sys.path.insert(0, "./src") class TestOpticalFlow(unittest.TestCase): - testdir = 'test_vids' + testdir = "test_vids" def setUp(self): dirname = self.testdir @@ -17,12 +19,11 @@ def setUp(self): def _write_zero(cl, i): shutil.copy( - 'tests/vidtest.ogv', - os.path.join(self.testdir, '%s_%d.ogv' % (cl, i)) + "tests/vidtest.ogv", os.path.join(self.testdir, "%s_%d.ogv" % (cl, i)) ) for i in range(10): - for cl in ['A', 'B', 'C']: + for cl in ["A", "B", "C"]: _write_zero(cl, i) def tearDown(self): @@ -31,7 +32,7 @@ def tearDown(self): def test_init(self): """ Check opticalflow init """ gen = keras_video.OpticalFlowGenerator( - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv') + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv") ) assert len(gen.classes) == 3 assert gen.files_count == 30 @@ -41,8 +42,8 @@ def __get_with_method(self, method=keras_video.METHOD_ABS_DIFF): gen = keras_video.OpticalFlowGenerator( method=method, - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv'), - transformation=tr + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv"), + transformation=tr, ) seq, labels = next(gen) diff --git a/tests/slinding.py b/tests/slinding.py index ec944e0..7225540 100644 --- a/tests/slinding.py +++ b/tests/slinding.py @@ -1,15 +1,17 @@ -import keras_video -import keras -import unittest import os -import sys import shutil -sys.path.insert(0, './src') +import sys +import unittest + +import keras_video +from tensorflow import keras + +sys.path.insert(0, "./src") class TestSlinding(unittest.TestCase): - testdir = 'test_vids' + testdir = "test_vids" def setUp(self): dirname = self.testdir @@ -17,12 +19,11 @@ def setUp(self): def _write_zero(cl, i): shutil.copy( - 'tests/vidtest.ogv', - os.path.join(self.testdir, '%s_%d.ogv' % (cl, i)) + "tests/vidtest.ogv", os.path.join(self.testdir, "%s_%d.ogv" % (cl, i)) ) for i in range(10): - for cl in ['A', 'B', 'C']: + for cl in ["A", "B", "C"]: _write_zero(cl, i) def tearDown(self): @@ -31,10 +32,11 @@ def tearDown(self): def test_init(self): """ Check if slinding generator init """ g = keras_video.SlidingFrameGenerator( - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv')) - assert 'A' in g.classes - assert 'B' in g.classes - assert 'C' in g.classes + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv") + ) + assert "A" in g.classes + assert "B" in g.classes + assert "C" in g.classes assert g.files_count == 30 @@ -48,7 +50,8 @@ def test_with_transformation(self): tr = keras.preprocessing.image.ImageDataGenerator(rotation_range=10) g = keras_video.SlidingFrameGenerator( transformation=tr, - glob_pattern=os.path.join(self.testdir, '{classname}_*.ogv')) + glob_pattern=os.path.join(self.testdir, "{classname}_*.ogv"), + ) # check get item seq, labels = next(g)