|
| 1 | +import cv2 |
| 2 | +import numpy |
| 3 | +import os |
| 4 | +import json |
| 5 | + |
| 6 | +def get_image_paths( directory ): |
| 7 | + return [ x.path for x in os.scandir( directory ) if x.name.endswith(".jpg") or x.name.endswith(".png") ] |
| 8 | + |
| 9 | +from tqdm import tqdm |
| 10 | + |
| 11 | +guideShades = numpy.linspace(20,250,68) |
| 12 | + |
| 13 | +def load_images_masked(image_paths, convert=None,blurSize=35): |
| 14 | + basePath = os.path.split(image_paths[0])[0] |
| 15 | + alignments = os.path.join(basePath,'alignments.json') |
| 16 | + alignments = json.loads( open(alignments).read() ) |
| 17 | + |
| 18 | + all_images = [] |
| 19 | + landmarks = [] |
| 20 | + |
| 21 | + |
| 22 | + pbar = tqdm(alignments) |
| 23 | + for original,cropped,mat,points in pbar: |
| 24 | + pbar.set_description('loading '+basePath) |
| 25 | + cropped = os.path.split(cropped)[1] |
| 26 | + cropped = os.path.join(basePath,cropped) |
| 27 | + if cropped in image_paths and os.path.exists(cropped): |
| 28 | + cropped = cv2.imread(cropped).astype(float) |
| 29 | + |
| 30 | + mat = numpy.array(mat).reshape(2,3) |
| 31 | + points = numpy.array(points).reshape((-1,2)) |
| 32 | + |
| 33 | + mat = mat*160 |
| 34 | + mat[:,2] += 42 |
| 35 | + |
| 36 | + facepoints = numpy.array( points ).reshape((-1,2)) |
| 37 | + |
| 38 | + mask = numpy.zeros_like(cropped,dtype=numpy.uint8) |
| 39 | + |
| 40 | + hull = cv2.convexHull( facepoints.astype(int) ) |
| 41 | + hull = cv2.transform( hull.reshape(1,-1,2) , mat).reshape(-1,2).astype(int) |
| 42 | + |
| 43 | + cv2.fillConvexPoly( mask,hull,(255,255,255) ) |
| 44 | + |
| 45 | + kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(15,15)) |
| 46 | + |
| 47 | + mask = cv2.dilate(mask,kernel,iterations = 1,borderType=cv2.BORDER_REFLECT ) |
| 48 | + |
| 49 | + facepoints = cv2.transform( numpy.array( points ).reshape((1,-1,2)) , mat).reshape(-1,2).astype(int) |
| 50 | + |
| 51 | + mask = mask[:,:,0] |
| 52 | + |
| 53 | + all_images.append( numpy.dstack([cropped,mask]).astype(numpy.uint8) ) |
| 54 | + landmarks.append( facepoints ) |
| 55 | + |
| 56 | + return numpy.array(all_images),numpy.array(landmarks) |
| 57 | + |
| 58 | + |
| 59 | +def load_images_std( image_paths, convert=None ): |
| 60 | + iter_all_images = ( cv2.imread(fn) for fn in image_paths ) |
| 61 | + if convert: |
| 62 | + iter_all_images = ( convert(img) for img in iter_all_images ) |
| 63 | + for i,image in enumerate( iter_all_images ): |
| 64 | + if i == 0: |
| 65 | + all_images = numpy.empty( ( len(image_paths), ) + image.shape, dtype=image.dtype ) |
| 66 | + all_images[i] = image |
| 67 | + return all_images |
| 68 | + |
| 69 | + |
| 70 | +load_images = load_images_masked |
| 71 | + |
| 72 | +def get_transpose_axes( n ): |
| 73 | + if n % 2 == 0: |
| 74 | + y_axes = list( range( 1, n-1, 2 ) ) |
| 75 | + x_axes = list( range( 0, n-1, 2 ) ) |
| 76 | + else: |
| 77 | + y_axes = list( range( 0, n-1, 2 ) ) |
| 78 | + x_axes = list( range( 1, n-1, 2 ) ) |
| 79 | + return y_axes, x_axes, [n-1] |
| 80 | + |
| 81 | +def stack_images( images ): |
| 82 | + images_shape = numpy.array( images.shape ) |
| 83 | + new_axes = get_transpose_axes( len( images_shape ) ) |
| 84 | + new_shape = [ numpy.prod( images_shape[x] ) for x in new_axes ] |
| 85 | + return numpy.transpose( |
| 86 | + images, |
| 87 | + axes = numpy.concatenate( new_axes ) |
| 88 | + ).reshape( new_shape ) |
0 commit comments