Skip to content

Commit 5495867

Browse files
committed
upload code
1 parent 68683d8 commit 5495867

27 files changed

+3344
-2
lines changed

README.md

Lines changed: 13 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,2 +1,13 @@
1-
# cscs
2-
1+
Download ID encoder weight ms1mv3_arcface_r100_fp16_backbone.pth from:
2+
3+
https://onedrive.live.com/?id=4A83B6B633B029CC!5577&resid=4A83B6B633B029CC!5577&authkey=!AFZjr283nwZHqbA&cid=4a83b6b633b029cc
4+
5+
and should be placed in ./model/arcface/
6+
7+
8+
9+
10+
11+
Before swapping, use facealign.sh to align the face images.
12+
13+
After alignment, inference_adapter.sh is utilized to swapping
Lines changed: 229 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,229 @@
1+
# Code adapted from https://gist.github.com/lzhbrian/bde87ab23b499dd02ba4f588258f57d5
2+
"""
3+
brief: face alignment with FFHQ method (https://github.com/NVlabs/ffhq-dataset)
4+
author: lzhbrian (https://lzhbrian.me)
5+
date: 2020.1.5
6+
note: code is heavily borrowed from
7+
https://github.com/NVlabs/ffhq-dataset
8+
http://dlib.net/face_landmark_detection.py.html
9+
10+
requirements:
11+
apt install cmake
12+
conda install Pillow numpy scipy
13+
pip install dlib
14+
# download face landmark models from:
15+
# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
16+
"""
17+
import glob
18+
import os
19+
import argparse
20+
import numpy as np
21+
import PIL
22+
import PIL.Image
23+
import scipy
24+
import scipy.ndimage
25+
import dlib # pip install dlib if not found
26+
from tqdm import tqdm
27+
28+
# download models from: http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2
29+
predictor = dlib.shape_predictor('./shape_predictor_68_face_landmarks.dat')
30+
31+
32+
def get_landmark(filepath):
33+
"""get landmark with dlib
34+
:return: np.array shape=(68, 2)
35+
"""
36+
detector = dlib.get_frontal_face_detector()
37+
38+
img = dlib.load_rgb_image(filepath)
39+
dets = detector(img, 1)
40+
41+
if len(dets) == 0:
42+
print(' * No face detected.')
43+
raise Exception
44+
# exit()
45+
46+
# my editing here
47+
if len(dets) > 1:
48+
print(' * WARNING: {} faces detected in the image. Only preserve the largest face.'.format(len(dets)))
49+
img_ws = [d.right() - d.left() for d in dets]
50+
largest_idx = np.argmax(img_ws)
51+
dets = [dets[largest_idx]]
52+
53+
assert len(dets) == 1
54+
shape = predictor(img, dets[0])
55+
56+
t = list(shape.parts())
57+
a = []
58+
for tt in t:
59+
a.append([tt.x, tt.y])
60+
lm = np.array(a)
61+
return lm
62+
63+
64+
def align_face(filepath):
65+
"""
66+
:param filepath: str
67+
:return: PIL Image
68+
"""
69+
70+
lm = get_landmark(filepath)
71+
72+
lm_chin = lm[0: 17] # left-right
73+
lm_eyebrow_left = lm[17: 22] # left-right
74+
lm_eyebrow_right = lm[22: 27] # left-right
75+
lm_nose = lm[27: 31] # top-down
76+
lm_nostrils = lm[31: 36] # top-down
77+
lm_eye_left = lm[36: 42] # left-clockwise
78+
lm_eye_right = lm[42: 48] # left-clockwise
79+
lm_mouth_outer = lm[48: 60] # left-clockwise
80+
lm_mouth_inner = lm[60: 68] # left-clockwise
81+
82+
# Calculate auxiliary vectors.
83+
eye_left = np.mean(lm_eye_left, axis=0)
84+
eye_right = np.mean(lm_eye_right, axis=0)
85+
eye_avg = (eye_left + eye_right) * 0.5
86+
eye_to_eye = eye_right - eye_left
87+
mouth_left = lm_mouth_outer[0]
88+
mouth_right = lm_mouth_outer[6]
89+
mouth_avg = (mouth_left + mouth_right) * 0.5
90+
eye_to_mouth = mouth_avg - eye_avg
91+
92+
# Choose oriented crop rectangle.
93+
x = eye_to_eye - np.flipud(eye_to_mouth) * [-1, 1]
94+
x /= np.hypot(*x)
95+
x *= max(np.hypot(*eye_to_eye) * 2.0, np.hypot(*eye_to_mouth) * 1.8)
96+
y = np.flipud(x) * [-1, 1]
97+
c = eye_avg + eye_to_mouth * 0.1
98+
quad = np.stack([c - x - y, c - x + y, c + x + y, c + x - y])
99+
qsize = np.hypot(*x) * 2
100+
101+
# read image
102+
img = PIL.Image.open(filepath)
103+
104+
output_size = 1024
105+
transform_size = 4096
106+
enable_padding = True
107+
108+
# Shrink.
109+
shrink = int(np.floor(qsize / output_size * 0.5))
110+
if shrink > 1:
111+
rsize = (int(np.rint(float(img.size[0]) / shrink)), int(np.rint(float(img.size[1]) / shrink)))
112+
img = img.resize(rsize, PIL.Image.ANTIALIAS)
113+
quad /= shrink
114+
qsize /= shrink
115+
116+
# Crop.
117+
# crop: x0, y0, x1, y1
118+
border = max(int(np.rint(qsize * 0.1)), 3)
119+
crop = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
120+
int(np.ceil(max(quad[:, 1]))))
121+
crop = (max(crop[0] - border, 0), max(crop[1] - border, 0), min(crop[2] + border, img.size[0]),
122+
min(crop[3] + border, img.size[1]))
123+
if crop[2] - crop[0] < img.size[0] or crop[3] - crop[1] < img.size[1]:
124+
img = img.crop(crop)
125+
quad -= crop[0:2]
126+
127+
128+
# Pad.
129+
pad = (int(np.floor(min(quad[:, 0]))), int(np.floor(min(quad[:, 1]))), int(np.ceil(max(quad[:, 0]))),
130+
int(np.ceil(max(quad[:, 1]))))
131+
pad = (max(-pad[0] + border, 0), max(-pad[1] + border, 0), max(pad[2] - img.size[0] + border, 0),
132+
max(pad[3] - img.size[1] + border, 0))
133+
if enable_padding and max(pad) > border - 4:
134+
pad = np.maximum(pad, int(np.rint(qsize * 0.3)))
135+
img = np.pad(np.float32(img), ((pad[1], pad[3]), (pad[0], pad[2]), (0, 0)), 'reflect')
136+
h, w, _ = img.shape
137+
y, x, _ = np.ogrid[:h, :w, :1]
138+
mask = np.maximum(1.0 - np.minimum(np.float32(x) / pad[0], np.float32(w - 1 - x) / pad[2]),
139+
1.0 - np.minimum(np.float32(y) / pad[1], np.float32(h - 1 - y) / pad[3]))
140+
blur = qsize * 0.02
141+
img += (scipy.ndimage.gaussian_filter(img, [blur, blur, 0]) - img) * np.clip(mask * 3.0 + 1.0, 0.0, 1.0)
142+
img += (np.median(img, axis=(0, 1)) - img) * np.clip(mask, 0.0, 1.0)
143+
img = PIL.Image.fromarray(np.uint8(np.clip(np.rint(img), 0, 255)), 'RGB')
144+
quad += pad[:2]
145+
146+
# Transform.
147+
# print(quad)
148+
# print(quad+.5)
149+
# print((quad+.5).flatten())
150+
img = img.transform((transform_size, transform_size), PIL.Image.QUAD, (quad + 0.5).flatten(), PIL.Image.BILINEAR)
151+
img.save('transform.jpg')
152+
if output_size < transform_size:
153+
img = img.resize((output_size, output_size), PIL.Image.ANTIALIAS)
154+
155+
# Save aligned image.
156+
return crop, quad+.5, img
157+
158+
159+
def main():
160+
parser = argparse.ArgumentParser(description="Align face images to FFHQ format")
161+
parser.add_argument("--input", type=str, help="path of the input image")
162+
parser.add_argument("-o", "--output", type=str, default=None, help="output path of the aligned face")
163+
parser.add_argument("--save_np", action='store_true')
164+
parser.add_argument('--resize', type=int, default=0)
165+
# parser.add_argument("--checkpoint", type=str, default='./shape_predictor_68_face_landmarks.dat',)
166+
167+
vgg_face2=False
168+
args = parser.parse_args()
169+
# origin_images = glob.glob(os.path.join(args.input, "**/*.JPG"))
170+
171+
if vgg_face2:
172+
origin_images = glob.glob(os.path.join(args.input, "**/*.jpg"))
173+
else:
174+
origin_images = glob.glob(os.path.join(args.input, "*.jpg")) + glob.glob(os.path.join(args.input, "*.JPG")) + glob.glob(os.path.join(args.input, "*.png"))
175+
print(f"image nums: {len(origin_images)}")
176+
for path in tqdm(origin_images):
177+
# img = align_face(args.input)
178+
try:
179+
crop, quad, img = align_face(path)
180+
# save_path = path.replace(args.input, args.output)
181+
if vgg_face2:
182+
# print(args.output)
183+
# print(path[len(args.input):])
184+
save_path = os.path.join(args.output, path[len(args.input)+1:]) # need absolute path
185+
# print(save_path)
186+
save_dir = save_path[:-len(os.path.split(save_path)[-1])]
187+
# print(save_dir)
188+
# break
189+
os.makedirs(save_dir, exist_ok=True)
190+
191+
else:
192+
save_path = os.path.join(args.output, os.path.basename(path))
193+
os.makedirs(os.path.split(save_path)[0], exist_ok=True)
194+
195+
196+
197+
# print(' * Saving the aligned image to {}'.format(save_path))
198+
if args.resize>0:
199+
img.resize((args.resize, args.resize))
200+
201+
img.save(save_path)
202+
if args.save_np:
203+
np.save(save_path[:-4], crop)
204+
np.save(save_path[:-4]+'_quad', quad)
205+
except Exception as e:
206+
print(e)
207+
print(path)
208+
209+
def test_one_image():
210+
parser = argparse.ArgumentParser(description="Align face images to FFHQ format")
211+
parser.add_argument("--input", type=str, help="path of the input image")
212+
parser.add_argument("-o", "--output", type=str, default=None, help="output path of the aligned face")
213+
# parser.add_argument("--checkpoint", type=str, default='./shape_predictor_68_face_landmarks.dat',)
214+
215+
args = parser.parse_args()
216+
# origin_images = glob.glob(os.path.join(args.input, "**/*.JPG"))
217+
# origin_images = glob.glob(os.path.join(args.input, "*.jpg")) + glob.glob(os.path.join(args.input, "*.JPG")) + glob.glob(os.path.join(args.input, "*.png"))
218+
# print(f"image nums: {len(origin_images)}")
219+
# for path in tqdm(origin_images):
220+
# # img = align_face(args.input)
221+
# img = align_face(path)
222+
# save_path = path.replace(args.input, args.output)
223+
# os.makedirs(os.path.split(save_path)[0], exist_ok=True)
224+
# # print(' * Saving the aligned image to {}'.format(save_path))
225+
# img.save(save_path)
226+
align_face(args.input)
227+
228+
if __name__ == '__main__':
229+
main()

0 commit comments

Comments
 (0)