themblem/detection/qr_detection.py
2024-09-01 21:51:50 +01:00

448 lines
16 KiB
Python

#!/usr/bin/env python
# -*- encoding: utf-8 -*-
'''
@File : qr_detection.py
@Contact : zpyovo@hotmail.com
@License : (C)Copyright 2018-2019, Lab501-TransferLearning-SCUT
@Description :
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2022/4/22 09:08 Pengyu Zhang 1.0 None
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from PIL import Image, ImageEnhance
import numpy as np
import cv2
def solution_1_1(img, detector):
# 亮度处理
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(2)
birght_img = np.array(birght_img)
res, points = detector.detectAndDecode(birght_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_1_2(img, detector):
# 亮度处理
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(3)
birght_img = np.array(birght_img)
res, points = detector.detectAndDecode(birght_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_2_1(img, detector):
# #对比度增强 + 亮度处理
contrast_img = ImageEnhance.Contrast(img)
contrast_img = contrast_img.enhance(1.5)
birght_img = ImageEnhance.Brightness(contrast_img)
birght_img = birght_img.enhance(2)
birght_img = np.array(birght_img)
res, points = detector.detectAndDecode(birght_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_2_2(img, detector):
# #对比度增强 + 亮度处理
contrast_img = ImageEnhance.Contrast(img)
contrast_img = contrast_img.enhance(1.5)
birght_img = ImageEnhance.Brightness(contrast_img)
birght_img = birght_img.enhance(3)
birght_img = np.array(birght_img)
res, points = detector.detectAndDecode(birght_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(birght_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_3_1(img, detector):
# # 亮度处理 + 对比度增强
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(2)
contrast_img = ImageEnhance.Contrast(birght_img)
contrast_img = contrast_img.enhance(1.5)
contrast_img = np.array(contrast_img)
res, points = detector.detectAndDecode(contrast_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(contrast_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(contrast_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_3_2(img, detector):
# 亮度处理 + 对比度增强
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(3)
contrast_img = ImageEnhance.Contrast(birght_img)
contrast_img = contrast_img.enhance(1.5)
contrast_img = np.array(contrast_img)
res, points = detector.detectAndDecode(contrast_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(contrast_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(contrast_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_4_1(img, detector):
# 亮度处理 + 对比度增强 + 锐化
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(2)
contrast_img = ImageEnhance.Contrast(birght_img)
contrast_img = contrast_img.enhance(1.5)
sharpness_img = ImageEnhance.Sharpness(contrast_img)
sharpness_img = sharpness_img.enhance(1.5)
sharpness_img = np.array(sharpness_img)
res, points = detector.detectAndDecode(sharpness_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(sharpness_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(sharpness_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_4_2(img, detector):
# 亮度处理 + 对比度增强 + 锐化
birght_img = ImageEnhance.Brightness(img)
birght_img = birght_img.enhance(3)
contrast_img = ImageEnhance.Contrast(birght_img)
contrast_img = contrast_img.enhance(1.5)
sharpness_img = ImageEnhance.Sharpness(contrast_img)
sharpness_img = sharpness_img.enhance(1.5)
sharpness_img = np.array(sharpness_img)
res, points = detector.detectAndDecode(sharpness_img)
if len(res) == 0:
samll_img = cv2.resize(np.array(sharpness_img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
samll_img = cv2.resize(np.array(sharpness_img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
return res, points[0] * 4
return res, points[0] * 1
def solution_5(img, detector):
# 缩放X4
samll_img = cv2.resize(np.array(img), None, fx=1 / 4, fy=1 / 4, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 4
def solution_6(img, detector):
# 缩放X8
samll_img = cv2.resize(np.array(img), None, fx=1 / 8, fy=1 / 8, interpolation=cv2.INTER_CUBIC)
input = np.array(samll_img)
res, points = detector.detectAndDecode(input)
if len(res) == 0:
return None, None
else:
return res, points[0] * 8
def rotation_waffine(rotation_region, input):
# 亮度处理
birght_img = ImageEnhance.Brightness(rotation_region)
birght_img = birght_img.enhance(5)
# 灰度二值化
img_grey = cv2.cvtColor(np.array(birght_img), cv2.COLOR_BGR2GRAY)
ret2, thresh = cv2.threshold(img_grey, 0, 255,
cv2.THRESH_BINARY + cv2.THRESH_OTSU)
# 腐蚀
# OpenCV定义的结构矩形元素
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (50, 50))
eroded = cv2.erode(thresh, kernel)
# canny边缘检测
eroded_canny = cv2.Canny(eroded, 100, 300)
contours, hierarchy = cv2.findContours(eroded_canny, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# canny_img = np.zeros((np.array(birght_img).shape[0], np.array(birght_img).shape[1], 3), np.uint8) + 255
# canny_img = cv2.drawContours(canny_img, contours, -1, (0, 255, 0), 3)
# 寻找最大元素
k = 0
index = 0
if len(contours) != 0:
for i in range(len(contours)):
j = contours[i].size
if j > k:
k = j
index = i
else:
return input
# 拟合旋转矩形
cnt = contours[index]
rect = cv2.minAreaRect(cnt)
angle = rect[2]
# box = cv2.boxPoints(rect)
# box = np.int0(box)
# rect_img = cv2.drawContours(canny_img, [box], 0, (0, 0, 255), 2)
# 根据角度差计算仿射矩阵
height, width, _ = np.array(input).shape
center = (width // 2, height // 2)
if angle != 0.0:
if angle > 45.0:
angle = angle - 90
# angle = angle - 90
# rotate page if not straight relative to QR code
M = cv2.getRotationMatrix2D(center, angle, 1.0)
output = cv2.warpAffine(np.array(input), M, (width, height), flags=cv2.INTER_CUBIC,
borderMode=cv2.BORDER_REPLICATE)
output = Image.fromarray(output)
output = output.convert("RGB")
else:
output = input
return output
def rotation_region_crop(input,points):
input = np.array(input)
x1 = int(points[0, 0])
y1 = int(points[0, 1])
x2 = int(points[1, 0])
y2 = int(points[1, 1])
x_a = x1 - int((x2 - x1) * 0.1)
x_b = x2 + int((x2 - x1) * 0.1)
y_a = y1 - int((y2 - y1) * 0.1)
y_b = y2 + int((y2 - y1) * 0.1)
if x_a < 0:
x_a = 0
if y_a < 0:
y_a = 0
if x_b >= input.shape[1]:
x_b = input.shape[1]
if y_b >= input.shape[0]:
y_b = input.shape[0]
top_size, bottom_size, left_size, right_size = (50, 50, 50, 50)
rotation_region = cv2.copyMakeBorder(input[y_a:y_b, x_a:x_b], top_size, bottom_size, left_size, right_size,
borderType=cv2.BORDER_REPLICATE, value=0)
return rotation_region
def qr_detetor(input, detector):
success = False
if not success:
res, points = solution_1_1(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input,points)
img = rotation_waffine(Image.fromarray(rotation_region),input)
# img = input
# print('solution_1_1')
return res, points, img
if not success:
res, points = solution_1_2(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
# print('solution_1_2')
return res, points, img
if not success:
res, points = solution_5(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_5')
return res, points, img
if not success:
res, points = solution_6(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_6')
return res, points, img
if not success:
res, points = solution_2_1(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_2_1')
return res, points, img
if not success:
res, points = solution_2_2(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_2_2')
return res, points, img
if not success:
res, points = solution_3_1(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_3_1')
return res, points, img
if not success:
res, points = solution_3_2(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_3_2')
return res, points, img
if not success:
res, points = solution_4_1(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_4_1')
return res, points, img
if not success:
res, points = solution_4_2(input, detector)
if res == None:
success = False
else:
success = True
rotation_region = rotation_region_crop(input, points)
img = rotation_waffine(Image.fromarray(rotation_region), input)
# img = input
print('solution_4_2')
return res, points, img
if success is False:
return None, None, None