-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSIFT_app.py
executable file
·110 lines (83 loc) · 3.8 KB
/
SIFT_app.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
#!/usr/bin/env python
from PyQt5 import QtCore, QtGui, QtWidgets
from python_qt_binding import loadUi
import cv2
import sys
import numpy as np
class My_App(QtWidgets.QMainWindow):
def __init__(self):
super(My_App, self).__init__()
loadUi("./SIFT_app.ui", self)
self._cam_id = 0
self._cam_fps = 2
self._is_cam_enabled = False
self._is_template_loaded = False
self.template_path = ""
self.browse_button.clicked.connect(self.SLOT_browse_button)
self.toggle_cam_button.clicked.connect(self.SLOT_toggle_camera)
self._camera_device = cv2.VideoCapture(self._cam_id)
self._camera_device.set(3, 320)
self._camera_device.set(4, 240)
self._timer = QtCore.QTimer(self)
self._timer.timeout.connect(self.SLOT_query_camera)
self._timer.setInterval(int(1000 / self._cam_fps))
self.sift = cv2.SIFT_create()
self.kp_image, self.desc_image = [], []
self.image = None
index_params = dict(algorithm=1, trees=5)
search_params = dict(checks=50)
self.flann = cv2.FlannBasedMatcher(index_params, search_params)
def SLOT_browse_button(self):
dlg = QtWidgets.QFileDialog()
dlg.setFileMode(QtWidgets.QFileDialog.ExistingFile)
if dlg.exec_():
self.template_path = dlg.selectedFiles()[0]
pixmap = QtGui.QPixmap(self.template_path)
self.template_label.setPixmap(pixmap)
self.image = cv2.imread(self.template_path, cv2.IMREAD_GRAYSCALE)
self.kp_image, self.desc_image = self.sift.detectAndCompute(self.image, None)
print("Loaded template image file: " + self.template_path)
def convert_cv_to_pixmap(self, cv_img):
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2RGB)
height, width, channel = cv_img.shape
bytesPerLine = channel * width
q_img = QtGui.QImage(cv_img.data, width, height, bytesPerLine, QtGui.QImage.Format_RGB888)
return QtGui.QPixmap.fromImage(q_img)
def SLOT_query_camera(self):
ret, frame = self._camera_device.read()
grayframe = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
kp_grayframe, desc_grayframe = self.sift.detectAndCompute(grayframe, None)
matches = self.flann.knnMatch(self.desc_image, desc_grayframe, k=2)
good_points = []
for m, n in matches:
if m.distance < 0.7 * n.distance:
good_points.append(m)
if len(good_points) > 10:
query_points = np.float32([self.kp_image[m.queryIdx].pt for m in good_points]).reshape(-1, 1, 2)
train_points = np.float32([kp_grayframe[m.trainIdx].pt for m in good_points]).reshape(-1, 1, 2)
matrix, _ = cv2.findHomography(query_points, train_points, cv2.RANSAC, 5.0)
h, w = self.image.shape
pts = np.float32([[0, 0], [0, h], [w, h], [w, 0]]).reshape(-1, 1, 2)
dst = cv2.perspectiveTransform(pts, matrix)
homography = cv2.polylines(frame, [np.int32(dst)], True, (255, 0, 0), 3)
pixmap = self.convert_cv_to_pixmap(homography)
else:
matches_image = cv2.drawMatches(self.image, self.kp_image, grayframe, kp_grayframe, good_points, grayframe)
pixmap = self.convert_cv_to_pixmap(matches_image)
self.live_image_label.setPixmap(pixmap)
def SLOT_toggle_camera(self):
if self._is_cam_enabled:
self._timer.stop()
self._is_cam_enabled = False
self.toggle_cam_button.setText("&Enable camera")
else:
self._timer.start()
self._is_cam_enabled = True
self.toggle_cam_button.setText("&Disable camera")
def main():
app = QtWidgets.QApplication(sys.argv)
myApp = My_App()
myApp.show()
sys.exit(app.exec_())
if __name__ == "__main__":
main()