Skip to content

Commit b23847a

Browse files
Lucas PengLucas Peng
authored andcommitted
move hqtrivia.py to hqtrivia_webcam.py
1 parent 1a9276c commit b23847a

File tree

1 file changed

+102
-0
lines changed

1 file changed

+102
-0
lines changed

hqtrivia_webcam.py

Lines changed: 102 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,102 @@
1+
#import numpy as np
2+
import cv2
3+
from PIL import Image
4+
import sys
5+
import pyocr
6+
import pyocr.builders
7+
import Searcher
8+
9+
class HQTrivia:
10+
# cam_size = (1280, 720)
11+
cam_size = (1920, 1080)
12+
question_size = (int(700),int(280))
13+
answer_size = (int(650),int(350))
14+
question_rect = ((int(cam_size[0]/2-question_size[0]/2+70),
15+
int(cam_size[1]/2-question_size[1]/2)),
16+
(int(cam_size[0]/2+question_size[0]/2+70),
17+
int(cam_size[1]/2+question_size[1]/2)))
18+
answer_rect = ((int(cam_size[0]/2-answer_size[0]/2+70),
19+
int(cam_size[1]/2-answer_size[1]/2 + 350)),
20+
(int(cam_size[0]/2+answer_size[0]/2+70),
21+
int(cam_size[1]/2+answer_size[1]/2)+ 350))
22+
23+
def __init__(self):
24+
# initialize OCR
25+
tools = pyocr.get_available_tools()
26+
if len(tools) == 0:
27+
print("No OCR tool found")
28+
sys.exit(1)
29+
self.tool = tools[0]
30+
print("Will use tool '%s'" % (self.tool.get_name()))
31+
langs = self.tool.get_available_languages()
32+
print("Available languages: %s" % ", ".join(langs))
33+
self.lang = langs[0]
34+
print("Will use lang '%s'" % (self.lang))
35+
# initialize searcher class
36+
self.searcher = Searcher.Searcher()
37+
38+
def main_loop(self,flip=False):
39+
cap = cv2.VideoCapture(1)
40+
#cap.set(cv2.CAP_PROP_AUTOFOCUS, 0)#doesn't seem to work?
41+
while(True):
42+
# Capture frame-by-frame
43+
ret, frame = cap.read()
44+
# Our operations on the frame come here
45+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
46+
#print(gray.shape)
47+
if flip:
48+
gray_flip = cv2.flip(gray,1)
49+
else:
50+
gray_flip = gray
51+
cv2.rectangle(gray_flip, self.question_rect[0], self.question_rect[1], (0,0,0))
52+
cv2.rectangle(gray_flip, self.answer_rect[0], self.answer_rect[1], (0,0,0))
53+
# Display the resulting frame
54+
cv2.imshow('frame',gray_flip)
55+
56+
key = cv2.waitKey(1) & 0xFF
57+
if key == ord('c'):
58+
print('capture!')
59+
q_p1 = self.question_rect[0]
60+
q_p2 = self.question_rect[1]
61+
q_img = gray[q_p1[1]:q_p2[1], q_p1[0]:q_p2[0]]
62+
ans_p1 = self.answer_rect[0]
63+
ans_p2 = self.answer_rect[1]
64+
ans_img = gray[ans_p1[1]:ans_p2[1], ans_p1[0]:ans_p2[0]]
65+
#q_img = cv2.medianBlur(q_img,5)
66+
#q_img = cv2.GaussianBlur(q_img,(3,3),0)
67+
ret,q_img = cv2.threshold(q_img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
68+
#ans_img = cv2.GaussianBlur(ans_img,(5,5),0)
69+
#ret,ans_img = cv2.threshold(ans_img,200,255,cv2.THRESH_BINARY)
70+
ret,ans_img = cv2.threshold(ans_img,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)
71+
#ans_img = cv2.adaptiveThreshold(ans_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
72+
#q_img = cv2.adaptiveThreshold(q_img,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
73+
cv2.imwrite('test.png',q_img)
74+
cv2.imwrite('test_ans.png',ans_img)
75+
txt = self.tool.image_to_string(
76+
Image.fromarray(q_img),
77+
lang=self.lang,
78+
builder=pyocr.builders.TextBuilder()
79+
)
80+
anstxt = self.tool.image_to_string(
81+
Image.fromarray(ans_img),
82+
lang=self.lang,
83+
builder=pyocr.builders.TextBuilder()
84+
)
85+
query = txt.replace('\n',' ').rstrip()
86+
print(query)
87+
anstxt = anstxt.rstrip().split('\n')
88+
anstxt = list(filter(lambda x: x.rstrip()!='', anstxt))
89+
print(anstxt)
90+
try:
91+
self.searcher.search_answer(query,anstxt)
92+
except:
93+
pass
94+
elif key == ord('q'):
95+
break
96+
# When everything done, release the capture
97+
cap.release()
98+
cv2.destroyAllWindows()
99+
100+
if __name__ == '__main__':
101+
hqt = HQTrivia()
102+
hqt.main_loop()

0 commit comments

Comments
 (0)