Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

test.py #96

Open
fangsanyong opened this issue May 16, 2024 · 0 comments
Open

test.py #96

fangsanyong opened this issue May 16, 2024 · 0 comments

Comments

@fangsanyong
Copy link

"""
Author: Wouter Van Gansbeke
Licensed under the CC BY-NC 4.0 license (https://creativecommons.org/licenses/by-nc/4.0/)
"""

import numpy as np
import torch
import matplotlib.pyplot as plt
import os
import glob
from Networks.LSQ_layer import Net
from Networks.utils import define_args
from PIL import Image
import torchvision.transforms.functional as F
from torchvision import transforms
import cv2

def getimage(path_img,i):
resize=256
imglists=os.listdir(path_img)
img_name =imglists[i]
with open(path_img+"/"+img_name, 'rb') as f:
image = (Image.open(f).convert('RGB'))
w, h = image.size
image = F.crop(image, h-640, 0, 640, w)
image = F.resize(image, size=(resize, 2*resize), interpolation=Image.BILINEAR)
image = transforms.ToTensor()(image).float()

    return image,path_img+"/"+img_name

def draw_homography_points(img, x, resize=256, color=(255,0,0)):
y_start1 = (0.3+x[2])(resize-1)
y_start = 0.3
(resize-1)
y_stop = resize-1
src = np.float32([[0.45*(2resize-1),y_start],[0.55(2resize-1), y_start],[0.1(2resize-1),y_stop],[0.9(2resize-1), y_stop]])
dst = np.float32([[(0.45+x[0])
(2resize-1), y_start1],[(0.55+x[1])(2resize-1), y_start1],[(0.45+x[0])(2resize-1), y_stop],[(0.55+x[1])(2resize-1),y_stop]])
dst_ideal = np.float32([[0.45
(2resize-1), y_start],[0.55(2resize-1), y_start],[0.45(2resize-1), y_stop],[0.55(2resize-1),y_stop]])
xx=cv2.cvtColor((img
255).astype(np.uint8), cv2.COLOR_RGB2BGR)
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in src]
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in dst_ideal]
[cv2.circle(xx, tuple(list(map(int,idx))), radius=5, thickness=-1, color=(255,0,0)) for idx in dst]

return xx

def draw_fitted_line(img, params, resize, color=(255,0,0)):
params = params.data.cpu().tolist()
y_stop = 0.7
y_prime = np.linspace(0, y_stop, 20)
params = [0] * (4 - len(params)) + params
d, a, b, c = [params]
x_pred = d
(y_prime**3) + a*(y_prime)2 + b(y_prime) + c
x_pred = x_pred
(2resize-1)
y_prime = (1-y_prime)
(resize-1)
lane = [(xcord, ycord) for (xcord, ycord) in zip(x_pred, y_prime)]
img = cv2.polylines(img, [np.int32(lane)], isClosed = False, color = color,thickness = 1)
return img, lane

def test_projective_transform(input, resize, M):
# M_scaledup = np.array([[M[0,0],M[0,1]2,M[0,2](2resize-1)],[0,M[1,1],M[1,2](resize-1)],[0,M[2,1]/(resize-1),M[2,2]]])
M_scaledup = np.array([[M[0,0], M[0,1]2, M[0,2](2resize-1)],
[M[1,0]0.5, M[1,1], M[1,2](resize-1)],
[M[2,0]/(2
resize-1), M[2,1]/(resize-1), M[2,2]]])
inp = cv2.warpPerspective(np.asarray(input), M_scaledup, (2*resize,resize))
return inp, M_scaledup

def save_weightmap( M, weightmap_zeros, beta0, beta1, beta2, beta3, images, no_ortho, resize):
M = M.data.cpu().numpy()[0]
x = np.zeros(3)

wm0_zeros = weightmap_zeros.data.cpu()[0, 0].numpy()
wm1_zeros = weightmap_zeros.data.cpu()[0, 1].numpy()

im = images.permute(0, 2, 3, 1).data.cpu().numpy()[0]
im_orig = np.copy(im)
im_orig_projectpoint = draw_homography_points(im_orig, x, resize)
im, M_scaledup = test_projective_transform(im, resize, M)
im, lane0 = draw_fitted_line(im, beta0[0], resize, (255, 0, 0))
im, lane1 = draw_fitted_line(im, beta1[0], resize, (0, 0, 255))
# if beta2 is not None:
#     im, lane2 = draw_fitted_line(im, beta2[0], resize, (255, 255, 0))
#     im, lane3 = draw_fitted_line(im, beta3[0], resize, (255, 128, 0))
    
im_inverse = cv2.warpPerspective(im, np.linalg.inv(M_scaledup), (2*resize, resize))

im_orig = np.clip(im_orig, 0, 1)
im_inverse = np.clip(im_inverse, 0, 1)
im = np.clip(im, 0, 1)
fig = plt.figure()
ax1 = fig.add_subplot(221)
ax2 = fig.add_subplot(222)
ax3 = fig.add_subplot(223)
ax4 = fig.add_subplot(224)
ax1.imshow(im_orig)
ax2.imshow(im_orig_projectpoint)
ax3.imshow(im)
ax4.imshow(im_inverse)
fig.savefig('/dataset/fsy/LaneDetection_End2End-master/Birds_Eye_View_Loss1/' + 'weight_idx-{}_batch-{}'.format(11, 22))
plt.clf()
plt.close(fig)

global args
parser = define_args()
args = parser.parse_args()
torch.backends.cudnn.benchmark = args.cudnn
model = Net(args)
if not args.no_cuda:
model = model.cuda()
best_file_name = glob.glob(os.path.join("/dataset/fsy/LaneDetection_End2End-master/Saved1", 'model_best*'))[0]
checkpoint = torch.load(best_file_name)
model.load_state_dict(checkpoint['state_dict'])
model.eval()

image,path=getimage('/dataset/fsy/LaneDetection_End2End-master/IMAGES',0)
input=image.unsqueeze(0)
input_data = input.cuda()

beta0, beta1, beta2, beta3, weightmap_zeros, M, output_net, outputs_line, outputs_horizon = model(input_data, args.end_to_end)
save_weightmap(M,weightmap_zeros,beta0, beta1, beta2, beta3,input_data,False, 256)

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant