mirror of
https://github.com/283375/arcaea-offline-ocr.git
synced 2025-04-22 15:00:18 +00:00
Compare commits
6 Commits
580744b641
...
0d8e4dea8e
Author | SHA1 | Date | |
---|---|---|---|
0d8e4dea8e | |||
2b01f68a73 | |||
897705d23d | |||
c6aba3a7e9 | |||
f7cfb84135 | |||
8d33491d9b |
@ -1,5 +1,4 @@
|
||||
from .crop import *
|
||||
from .device import *
|
||||
from .mask import *
|
||||
from .ocr import *
|
||||
from .utils import *
|
||||
|
@ -8,7 +8,6 @@ from PIL import Image
|
||||
from ....crop import crop_xywh
|
||||
from ....ocr import FixRects, ocr_digits_by_contour_knn, preprocess_hog
|
||||
from ....phash_db import ImagePHashDatabase
|
||||
from ....sift_db import SIFTDatabase
|
||||
from ....types import Mat, cv2_ml_KNearest
|
||||
from ....utils import construct_int_xywh_rect
|
||||
from ...shared import B30OcrResultItem
|
||||
|
101
src/arcaea_offline_ocr/device/ocr.py
Normal file
101
src/arcaea_offline_ocr/device/ocr.py
Normal file
@ -0,0 +1,101 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from ..crop import crop_xywh
|
||||
from ..ocr import (
|
||||
FixRects,
|
||||
ocr_digit_samples_knn,
|
||||
ocr_digits_by_contour_knn,
|
||||
preprocess_hog,
|
||||
resize_fill_square,
|
||||
)
|
||||
from ..phash_db import ImagePHashDatabase
|
||||
from .roi.extractor import DeviceRoiExtractor
|
||||
from .roi.masker import DeviceRoiMasker
|
||||
|
||||
|
||||
class DeviceOcr:
|
||||
def __init__(
|
||||
self,
|
||||
extractor: DeviceRoiExtractor,
|
||||
masker: DeviceRoiMasker,
|
||||
knn_model: cv2.ml.KNearest,
|
||||
phash_db: ImagePHashDatabase,
|
||||
):
|
||||
self.extractor = extractor
|
||||
self.masker = masker
|
||||
self.knn_model = knn_model
|
||||
self.phash_db = phash_db
|
||||
|
||||
def pfl(self, roi_gray: cv2.Mat, factor: float = 1.25):
|
||||
contours, _ = cv2.findContours(
|
||||
roi_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
filtered_contours = [c for c in contours if cv2.contourArea(c) >= 5 * factor]
|
||||
rects = [cv2.boundingRect(c) for c in filtered_contours]
|
||||
rects = FixRects.connect_broken(rects, roi_gray.shape[1], roi_gray.shape[0])
|
||||
|
||||
filtered_rects = [r for r in rects if r[2] >= 5 * factor and r[3] >= 6 * factor]
|
||||
filtered_rects = FixRects.split_connected(roi_gray, filtered_rects)
|
||||
filtered_rects = sorted(filtered_rects, key=lambda r: r[0])
|
||||
|
||||
roi_ocr = roi_gray.copy()
|
||||
filtered_contours_flattened = {tuple(c.flatten()) for c in filtered_contours}
|
||||
for contour in contours:
|
||||
if tuple(contour.flatten()) in filtered_contours_flattened:
|
||||
continue
|
||||
roi_ocr = cv2.fillPoly(roi_ocr, [contour], [0])
|
||||
digit_rois = [
|
||||
resize_fill_square(crop_xywh(roi_ocr, r), 20)
|
||||
for r in sorted(filtered_rects, key=lambda r: r[0])
|
||||
]
|
||||
|
||||
samples = preprocess_hog(digit_rois)
|
||||
return ocr_digit_samples_knn(samples, self.knn_model)
|
||||
|
||||
def pure(self):
|
||||
return self.pfl(self.masker.pure(self.extractor.pure))
|
||||
|
||||
def far(self):
|
||||
return self.pfl(self.masker.far(self.extractor.far))
|
||||
|
||||
def lost(self):
|
||||
return self.pfl(self.masker.lost(self.extractor.lost))
|
||||
|
||||
def score(self):
|
||||
roi = self.masker.score(self.extractor.score)
|
||||
contours, _ = cv2.findContours(roi, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
for contour in contours:
|
||||
x, y, w, h = cv2.boundingRect(contour)
|
||||
if h < roi.shape[0] * 0.6:
|
||||
roi = cv2.fillPoly(roi, [contour], [0])
|
||||
return ocr_digits_by_contour_knn(roi, self.knn_model)
|
||||
|
||||
def rating_class(self):
|
||||
roi = self.extractor.rating_class
|
||||
results = [
|
||||
self.masker.rating_class_pst(roi),
|
||||
self.masker.rating_class_prs(roi),
|
||||
self.masker.rating_class_ftr(roi),
|
||||
self.masker.rating_class_byd(roi),
|
||||
]
|
||||
return max(enumerate(results), key=lambda i: np.count_nonzero(i[1]))[0]
|
||||
|
||||
def max_recall(self):
|
||||
return ocr_digits_by_contour_knn(
|
||||
self.masker.max_recall(self.extractor.max_recall), self.knn_model
|
||||
)
|
||||
|
||||
def clear_status(self):
|
||||
roi = self.extractor.clear_status
|
||||
results = [
|
||||
self.masker.clear_status_track_lost(roi),
|
||||
self.masker.clear_status_track_complete(roi),
|
||||
self.masker.clear_status_full_recall(roi),
|
||||
self.masker.clear_status_pure_memory(roi),
|
||||
]
|
||||
return max(enumerate(results), key=lambda i: np.count_nonzero(i[1]))[0]
|
||||
|
||||
def song_id(self):
|
||||
return self.phash_db.lookup_image(Image.fromarray(self.extractor.jacket))[0]
|
@ -0,0 +1,3 @@
|
||||
from .common import DeviceAutoRoiSizes
|
||||
from .t1 import DeviceAutoRoiSizesT1
|
||||
from .t2 import DeviceAutoRoiSizesT2
|
@ -1,7 +1,7 @@
|
||||
from ..common import Sizes
|
||||
from ..common import DeviceRoiSizes
|
||||
|
||||
|
||||
class AutoSizes(Sizes):
|
||||
class DeviceAutoRoiSizes(DeviceRoiSizes):
|
||||
def __init__(self, w: int, h: int):
|
||||
self.w = w
|
||||
self.h = h
|
@ -1,7 +1,7 @@
|
||||
from .common import AutoSizes
|
||||
from .common import DeviceAutoRoiSizes
|
||||
|
||||
|
||||
class AutoSizesT1(AutoSizes):
|
||||
class DeviceAutoRoiSizesT1(DeviceAutoRoiSizes):
|
||||
@property
|
||||
def factor(self):
|
||||
return (
|
@ -1,7 +1,7 @@
|
||||
from .common import AutoSizes
|
||||
from .common import DeviceAutoRoiSizes
|
||||
|
||||
|
||||
class AutoSizesT2(AutoSizes):
|
||||
class DeviceAutoRoiSizesT2(DeviceAutoRoiSizes):
|
||||
@property
|
||||
def factor(self):
|
||||
return (
|
@ -3,7 +3,7 @@ from typing import Tuple
|
||||
Rect = Tuple[int, int, int, int]
|
||||
|
||||
|
||||
class Sizes:
|
||||
class DeviceRoiSizes:
|
||||
pure: Rect
|
||||
far: Rect
|
||||
lost: Rect
|
1
src/arcaea_offline_ocr/device/roi/extractor/__init__.py
Normal file
1
src/arcaea_offline_ocr/device/roi/extractor/__init__.py
Normal file
@ -0,0 +1 @@
|
||||
from .common import DeviceRoiExtractor
|
@ -1,11 +1,11 @@
|
||||
import cv2
|
||||
|
||||
from ..crop import crop_xywh
|
||||
from .sizes.common import Sizes
|
||||
from ....crop import crop_xywh
|
||||
from ..definitions.common import DeviceRoiSizes
|
||||
|
||||
|
||||
class Extractor:
|
||||
def __init__(self, img: cv2.Mat, sizes: Sizes):
|
||||
class DeviceRoiExtractor:
|
||||
def __init__(self, img: cv2.Mat, sizes: DeviceRoiSizes):
|
||||
self.img = img
|
||||
self.sizes = sizes
|
||||
|
||||
@ -28,6 +28,10 @@ class Extractor:
|
||||
def score(self):
|
||||
return crop_xywh(self.img, self.__construct_int_rect(self.sizes.score))
|
||||
|
||||
@property
|
||||
def jacket(self):
|
||||
return crop_xywh(self.img, self.__construct_int_rect(self.sizes.jacket))
|
||||
|
||||
@property
|
||||
def rating_class(self):
|
||||
return crop_xywh(self.img, self.__construct_int_rect(self.sizes.rating_class))
|
2
src/arcaea_offline_ocr/device/roi/masker/__init__.py
Normal file
2
src/arcaea_offline_ocr/device/roi/masker/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .auto import *
|
||||
from .common import DeviceRoiMasker
|
@ -0,0 +1,3 @@
|
||||
from .common import DeviceAutoRoiMasker
|
||||
from .t1 import DeviceAutoRoiMaskerT1
|
||||
from .t2 import DeviceAutoRoiMaskerT2
|
5
src/arcaea_offline_ocr/device/roi/masker/auto/common.py
Normal file
5
src/arcaea_offline_ocr/device/roi/masker/auto/common.py
Normal file
@ -0,0 +1,5 @@
|
||||
from ..common import DeviceRoiMasker
|
||||
|
||||
|
||||
class DeviceAutoRoiMasker(DeviceRoiMasker):
|
||||
...
|
123
src/arcaea_offline_ocr/device/roi/masker/auto/t1.py
Normal file
123
src/arcaea_offline_ocr/device/roi/masker/auto/t1.py
Normal file
@ -0,0 +1,123 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from .common import DeviceAutoRoiMasker
|
||||
|
||||
GRAY_BGR_MIN = np.array([50] * 3, np.uint8)
|
||||
GRAY_BGR_MAX = np.array([160] * 3, np.uint8)
|
||||
|
||||
WHITE_HSV_MIN = np.array([0, 0, 240], np.uint8)
|
||||
WHITE_HSV_MAX = np.array([179, 10, 255], np.uint8)
|
||||
|
||||
PST_HSV_MIN = np.array([100, 50, 80], np.uint8)
|
||||
PST_HSV_MAX = np.array([100, 255, 255], np.uint8)
|
||||
|
||||
PRS_HSV_MIN = np.array([43, 40, 75], np.uint8)
|
||||
PRS_HSV_MAX = np.array([50, 155, 190], np.uint8)
|
||||
|
||||
FTR_HSV_MIN = np.array([149, 30, 0], np.uint8)
|
||||
FTR_HSV_MAX = np.array([155, 181, 150], np.uint8)
|
||||
|
||||
BYD_HSV_MIN = np.array([170, 50, 50], np.uint8)
|
||||
BYD_HSV_MAX = np.array([179, 210, 198], np.uint8)
|
||||
|
||||
TRACK_LOST_HSV_MIN = np.array([170, 75, 90], np.uint8)
|
||||
TRACK_LOST_HSV_MAX = np.array([175, 170, 160], np.uint8)
|
||||
|
||||
TRACK_COMPLETE_HSV_MIN = np.array([140, 0, 50], np.uint8)
|
||||
TRACK_COMPLETE_HSV_MAX = np.array([145, 50, 130], np.uint8)
|
||||
|
||||
FULL_RECALL_HSV_MIN = np.array([140, 60, 80], np.uint8)
|
||||
FULL_RECALL_HSV_MAX = np.array([150, 130, 145], np.uint8)
|
||||
|
||||
PURE_MEMORY_HSV_MIN = np.array([90, 70, 80], np.uint8)
|
||||
PURE_MEMORY_HSV_MAX = np.array([110, 200, 175], np.uint8)
|
||||
|
||||
|
||||
class DeviceAutoRoiMaskerT1(DeviceAutoRoiMasker):
|
||||
@classmethod
|
||||
def gray(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
bgr_value_equal_mask = np.max(roi_bgr, axis=2) - np.min(roi_bgr, axis=2) <= 5
|
||||
img_bgr = roi_bgr.copy()
|
||||
img_bgr[~bgr_value_equal_mask] = np.array([0, 0, 0], roi_bgr.dtype)
|
||||
img_bgr = cv2.erode(img_bgr, cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)))
|
||||
img_bgr = cv2.dilate(img_bgr, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1)))
|
||||
return cv2.inRange(img_bgr, GRAY_BGR_MIN, GRAY_BGR_MAX)
|
||||
|
||||
@classmethod
|
||||
def pure(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.gray(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def far(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.gray(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.gray(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def score(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), WHITE_HSV_MIN, WHITE_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_pst(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), PST_HSV_MIN, PST_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_prs(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), PRS_HSV_MIN, PRS_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_ftr(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), FTR_HSV_MIN, FTR_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_byd(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), BYD_HSV_MIN, BYD_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def max_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.gray(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
TRACK_LOST_HSV_MIN,
|
||||
TRACK_LOST_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_complete(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
TRACK_COMPLETE_HSV_MIN,
|
||||
TRACK_COMPLETE_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_full_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
FULL_RECALL_HSV_MIN,
|
||||
FULL_RECALL_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_pure_memory(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
PURE_MEMORY_HSV_MIN,
|
||||
PURE_MEMORY_HSV_MAX,
|
||||
)
|
128
src/arcaea_offline_ocr/device/roi/masker/auto/t2.py
Normal file
128
src/arcaea_offline_ocr/device/roi/masker/auto/t2.py
Normal file
@ -0,0 +1,128 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from .common import DeviceAutoRoiMasker
|
||||
|
||||
PFL_HSV_MIN = np.array([0, 0, 248], np.uint8)
|
||||
PFL_HSV_MAX = np.array([179, 10, 255], np.uint8)
|
||||
|
||||
WHITE_HSV_MIN = np.array([0, 0, 240], np.uint8)
|
||||
WHITE_HSV_MAX = np.array([179, 10, 255], np.uint8)
|
||||
|
||||
|
||||
PST_HSV_MIN = np.array([100, 50, 80], np.uint8)
|
||||
PST_HSV_MAX = np.array([100, 255, 255], np.uint8)
|
||||
|
||||
PRS_HSV_MIN = np.array([43, 40, 75], np.uint8)
|
||||
PRS_HSV_MAX = np.array([50, 155, 190], np.uint8)
|
||||
|
||||
FTR_HSV_MIN = np.array([149, 30, 0], np.uint8)
|
||||
FTR_HSV_MAX = np.array([155, 181, 150], np.uint8)
|
||||
|
||||
BYD_HSV_MIN = np.array([170, 50, 50], np.uint8)
|
||||
BYD_HSV_MAX = np.array([179, 210, 198], np.uint8)
|
||||
|
||||
MAX_RECALL_HSV_MIN = np.array([125, 0, 0], np.uint8)
|
||||
MAX_RECALL_HSV_MAX = np.array([130, 100, 150], np.uint8)
|
||||
|
||||
TRACK_LOST_HSV_MIN = np.array([170, 75, 90], np.uint8)
|
||||
TRACK_LOST_HSV_MAX = np.array([175, 170, 160], np.uint8)
|
||||
|
||||
TRACK_COMPLETE_HSV_MIN = np.array([140, 0, 50], np.uint8)
|
||||
TRACK_COMPLETE_HSV_MAX = np.array([145, 50, 130], np.uint8)
|
||||
|
||||
FULL_RECALL_HSV_MIN = np.array([140, 60, 80], np.uint8)
|
||||
FULL_RECALL_HSV_MAX = np.array([150, 130, 145], np.uint8)
|
||||
|
||||
PURE_MEMORY_HSV_MIN = np.array([90, 70, 80], np.uint8)
|
||||
PURE_MEMORY_HSV_MAX = np.array([110, 200, 175], np.uint8)
|
||||
|
||||
|
||||
class DeviceAutoRoiMaskerT2(DeviceAutoRoiMasker):
|
||||
@classmethod
|
||||
def pfl(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), PFL_HSV_MIN, PFL_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def pure(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.pfl(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def far(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.pfl(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cls.pfl(roi_bgr)
|
||||
|
||||
@classmethod
|
||||
def score(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), WHITE_HSV_MIN, WHITE_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_pst(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), PST_HSV_MIN, PST_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_prs(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), PRS_HSV_MIN, PRS_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_ftr(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), FTR_HSV_MIN, FTR_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def rating_class_byd(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV), BYD_HSV_MIN, BYD_HSV_MAX
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def max_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
MAX_RECALL_HSV_MIN,
|
||||
MAX_RECALL_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
TRACK_LOST_HSV_MIN,
|
||||
TRACK_LOST_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_complete(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
TRACK_COMPLETE_HSV_MIN,
|
||||
TRACK_COMPLETE_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_full_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
FULL_RECALL_HSV_MIN,
|
||||
FULL_RECALL_HSV_MAX,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def clear_status_pure_memory(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
return cv2.inRange(
|
||||
cv2.cvtColor(roi_bgr, cv2.COLOR_BGR2HSV),
|
||||
PURE_MEMORY_HSV_MIN,
|
||||
PURE_MEMORY_HSV_MAX,
|
||||
)
|
55
src/arcaea_offline_ocr/device/roi/masker/common.py
Normal file
55
src/arcaea_offline_ocr/device/roi/masker/common.py
Normal file
@ -0,0 +1,55 @@
|
||||
import cv2
|
||||
|
||||
|
||||
class DeviceRoiMasker:
|
||||
@classmethod
|
||||
def pure(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def far(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def score(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def rating_class_pst(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def rating_class_prs(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def rating_class_ftr(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def rating_class_byd(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def max_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_lost(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def clear_status_track_complete(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def clear_status_full_recall(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@classmethod
|
||||
def clear_status_pure_memory(cls, roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
@ -1,53 +0,0 @@
|
||||
from typing import Tuple
|
||||
|
||||
from ...types import Mat
|
||||
from .definition import DeviceV1
|
||||
|
||||
__all__ = [
|
||||
"crop_img",
|
||||
"crop_from_device_attr",
|
||||
"crop_to_pure",
|
||||
"crop_to_far",
|
||||
"crop_to_lost",
|
||||
"crop_to_max_recall",
|
||||
"crop_to_rating_class",
|
||||
"crop_to_score",
|
||||
"crop_to_title",
|
||||
]
|
||||
|
||||
|
||||
def crop_img(img: Mat, *, top: int, left: int, bottom: int, right: int):
|
||||
return img[top:bottom, left:right]
|
||||
|
||||
|
||||
def crop_from_device_attr(img: Mat, rect: Tuple[int, int, int, int]):
|
||||
x, y, w, h = rect
|
||||
return crop_img(img, top=y, left=x, bottom=y + h, right=x + w)
|
||||
|
||||
|
||||
def crop_to_pure(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.pure)
|
||||
|
||||
|
||||
def crop_to_far(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.far)
|
||||
|
||||
|
||||
def crop_to_lost(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.lost)
|
||||
|
||||
|
||||
def crop_to_max_recall(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.max_recall)
|
||||
|
||||
|
||||
def crop_to_rating_class(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.rating_class)
|
||||
|
||||
|
||||
def crop_to_score(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.score)
|
||||
|
||||
|
||||
def crop_to_title(screenshot: Mat, device: DeviceV1):
|
||||
return crop_from_device_attr(screenshot, device.title)
|
@ -1,37 +0,0 @@
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, Tuple
|
||||
|
||||
__all__ = ["DeviceV1"]
|
||||
|
||||
|
||||
@dataclass(kw_only=True)
|
||||
class DeviceV1:
|
||||
version: int
|
||||
uuid: str
|
||||
name: str
|
||||
pure: Tuple[int, int, int, int]
|
||||
far: Tuple[int, int, int, int]
|
||||
lost: Tuple[int, int, int, int]
|
||||
max_recall: Tuple[int, int, int, int]
|
||||
rating_class: Tuple[int, int, int, int]
|
||||
score: Tuple[int, int, int, int]
|
||||
title: Tuple[int, int, int, int]
|
||||
|
||||
@classmethod
|
||||
def from_json_object(cls, json_dict: Dict[str, Any]):
|
||||
if json_dict["version"] == 1:
|
||||
return cls(
|
||||
version=1,
|
||||
uuid=json_dict["uuid"],
|
||||
name=json_dict["name"],
|
||||
pure=json_dict["pure"],
|
||||
far=json_dict["far"],
|
||||
lost=json_dict["lost"],
|
||||
max_recall=json_dict["max_recall"],
|
||||
rating_class=json_dict["rating_class"],
|
||||
score=json_dict["score"],
|
||||
title=json_dict["title"],
|
||||
)
|
||||
|
||||
def repr_info(self):
|
||||
return f"Device(version={self.version}, uuid={repr(self.uuid)}, name={repr(self.name)})"
|
@ -1,86 +0,0 @@
|
||||
from typing import List
|
||||
|
||||
import cv2
|
||||
|
||||
from ...crop import crop_xywh
|
||||
from ...mask import mask_gray, mask_white
|
||||
from ...ocr import ocr_digits_by_contour_knn, ocr_rating_class
|
||||
from ...types import Mat, cv2_ml_KNearest
|
||||
from ..shared import DeviceOcrResult
|
||||
from .crop import *
|
||||
from .definition import DeviceV1
|
||||
|
||||
|
||||
class DeviceV1Ocr:
|
||||
def __init__(self, device: DeviceV1, knn_model: cv2_ml_KNearest):
|
||||
self.__device = device
|
||||
self.__knn_model = knn_model
|
||||
|
||||
@property
|
||||
def device(self):
|
||||
return self.__device
|
||||
|
||||
@device.setter
|
||||
def device(self, value):
|
||||
self.__device = value
|
||||
|
||||
@property
|
||||
def knn_model(self):
|
||||
return self.__knn_model
|
||||
|
||||
@knn_model.setter
|
||||
def knn_model(self, value):
|
||||
self.__knn_model = value
|
||||
|
||||
def preprocess_score_roi(self, __roi_gray: Mat) -> List[Mat]:
|
||||
roi_gray = __roi_gray.copy()
|
||||
contours, _ = cv2.findContours(
|
||||
roi_gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
for contour in contours:
|
||||
rect = cv2.boundingRect(contour)
|
||||
if rect[3] > roi_gray.shape[0] * 0.6:
|
||||
continue
|
||||
roi_gray = cv2.fillPoly(roi_gray, [contour], 0)
|
||||
return roi_gray
|
||||
|
||||
def ocr(self, img_bgr: Mat):
|
||||
rating_class_roi = crop_to_rating_class(img_bgr, self.device)
|
||||
rating_class = ocr_rating_class(rating_class_roi)
|
||||
|
||||
pfl_mr_roi = [
|
||||
crop_to_pure(img_bgr, self.device),
|
||||
crop_to_far(img_bgr, self.device),
|
||||
crop_to_lost(img_bgr, self.device),
|
||||
crop_to_max_recall(img_bgr, self.device),
|
||||
]
|
||||
pfl_mr_roi = [mask_gray(roi) for roi in pfl_mr_roi]
|
||||
|
||||
pure, far, lost = [
|
||||
ocr_digits_by_contour_knn(roi, self.knn_model) for roi in pfl_mr_roi[:3]
|
||||
]
|
||||
|
||||
max_recall_contours, _ = cv2.findContours(
|
||||
pfl_mr_roi[3], cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
max_recall_rects = [cv2.boundingRect(c) for c in max_recall_contours]
|
||||
max_recall_rect = sorted(max_recall_rects, key=lambda r: r[0])[-1]
|
||||
max_recall_roi = crop_xywh(img_bgr, max_recall_rect)
|
||||
max_recall = ocr_digits_by_contour_knn(max_recall_roi, self.knn_model)
|
||||
|
||||
score_roi = crop_to_score(img_bgr, self.device)
|
||||
score_roi = mask_white(score_roi)
|
||||
score_roi = self.preprocess_score_roi(score_roi)
|
||||
score = ocr_digits_by_contour_knn(score_roi, self.knn_model)
|
||||
|
||||
return DeviceOcrResult(
|
||||
song_id=None,
|
||||
title=None,
|
||||
rating_class=rating_class,
|
||||
pure=pure,
|
||||
far=far,
|
||||
lost=lost,
|
||||
score=score,
|
||||
max_recall=max_recall,
|
||||
clear_type=None,
|
||||
)
|
@ -1,4 +0,0 @@
|
||||
from .definition import DeviceV2
|
||||
from .ocr import DeviceV2Ocr
|
||||
from .rois import DeviceV2AutoRois, DeviceV2Rois
|
||||
from .shared import MAX_RECALL_CLOSE_KERNEL
|
@ -1,26 +0,0 @@
|
||||
from typing import Iterable
|
||||
|
||||
from attrs import define, field
|
||||
|
||||
from ...types import XYWHRect
|
||||
|
||||
|
||||
def iterable_to_xywh_rect(__iter: Iterable) -> XYWHRect:
|
||||
return XYWHRect(*__iter)
|
||||
|
||||
|
||||
@define(kw_only=True)
|
||||
class DeviceV2:
|
||||
version = field(type=int)
|
||||
uuid = field(type=str)
|
||||
name = field(type=str)
|
||||
crop_black_edges = field(type=bool)
|
||||
factor = field(type=float)
|
||||
pure = field(converter=iterable_to_xywh_rect, default=[0, 0, 0, 0])
|
||||
far = field(converter=iterable_to_xywh_rect, default=[0, 0, 0, 0])
|
||||
lost = field(converter=iterable_to_xywh_rect, default=[0, 0, 0, 0])
|
||||
score = field(converter=iterable_to_xywh_rect, default=[0, 0, 0, 0])
|
||||
max_recall_rating_class = field(
|
||||
converter=iterable_to_xywh_rect, default=[0, 0, 0, 0]
|
||||
)
|
||||
title = field(converter=iterable_to_xywh_rect, default=[0, 0, 0, 0])
|
@ -1,172 +0,0 @@
|
||||
import math
|
||||
from functools import lru_cache
|
||||
from typing import Sequence
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
||||
from ...crop import crop_xywh
|
||||
from ...mask import (
|
||||
mask_byd,
|
||||
mask_ftr,
|
||||
mask_gray,
|
||||
mask_max_recall_purple,
|
||||
mask_pfl_white,
|
||||
mask_prs,
|
||||
mask_pst,
|
||||
mask_white,
|
||||
)
|
||||
from ...ocr import (
|
||||
FixRects,
|
||||
ocr_digit_samples_knn,
|
||||
ocr_digits_by_contour_knn,
|
||||
preprocess_hog,
|
||||
resize_fill_square,
|
||||
)
|
||||
from ...phash_db import ImagePHashDatabase
|
||||
from ...sift_db import SIFTDatabase
|
||||
from ...types import Mat, cv2_ml_KNearest
|
||||
from ..shared import DeviceOcrResult
|
||||
from .preprocess import find_digits_preprocess
|
||||
from .rois import DeviceV2Rois
|
||||
from .shared import MAX_RECALL_CLOSE_KERNEL
|
||||
from .sizes import SizesV2
|
||||
|
||||
|
||||
class DeviceV2Ocr:
|
||||
def __init__(self, knn_model: cv2_ml_KNearest, phash_db: ImagePHashDatabase):
|
||||
self.__knn_model = knn_model
|
||||
self.__phash_db = phash_db
|
||||
|
||||
@property
|
||||
def knn_model(self):
|
||||
if not self.__knn_model:
|
||||
raise ValueError("`knn_model` unset.")
|
||||
return self.__knn_model
|
||||
|
||||
@knn_model.setter
|
||||
def knn_model(self, value: cv2_ml_KNearest):
|
||||
self.__knn_model = value
|
||||
|
||||
@property
|
||||
def phash_db(self):
|
||||
if not self.__phash_db:
|
||||
raise ValueError("`phash_db` unset.")
|
||||
return self.__phash_db
|
||||
|
||||
@phash_db.setter
|
||||
def phash_db(self, value: SIFTDatabase):
|
||||
self.__phash_db = value
|
||||
|
||||
@lru_cache
|
||||
def _get_digit_widths(self, num_list: Sequence[int], factor: float):
|
||||
widths = set()
|
||||
for n in num_list:
|
||||
lower = math.floor(n * factor)
|
||||
upper = math.ceil(n * factor)
|
||||
widths.update(range(lower, upper + 1))
|
||||
return widths
|
||||
|
||||
def _base_ocr_pfl(self, roi_masked: Mat, factor: float = 1.0):
|
||||
contours, _ = cv2.findContours(
|
||||
roi_masked, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
filtered_contours = [c for c in contours if cv2.contourArea(c) >= 5 * factor]
|
||||
rects = [cv2.boundingRect(c) for c in filtered_contours]
|
||||
rects = FixRects.connect_broken(rects, roi_masked.shape[1], roi_masked.shape[0])
|
||||
rect_contour_map = dict(zip(rects, filtered_contours))
|
||||
|
||||
filtered_rects = [r for r in rects if r[2] >= 5 * factor and r[3] >= 6 * factor]
|
||||
filtered_rects = FixRects.split_connected(roi_masked, filtered_rects)
|
||||
filtered_rects = sorted(filtered_rects, key=lambda r: r[0])
|
||||
|
||||
roi_ocr = roi_masked.copy()
|
||||
filtered_contours_flattened = {tuple(c.flatten()) for c in filtered_contours}
|
||||
for contour in contours:
|
||||
if tuple(contour.flatten()) in filtered_contours_flattened:
|
||||
continue
|
||||
roi_ocr = cv2.fillPoly(roi_ocr, [contour], [0])
|
||||
digit_rois = [
|
||||
resize_fill_square(crop_xywh(roi_ocr, r), 20)
|
||||
for r in sorted(filtered_rects, key=lambda r: r[0])
|
||||
]
|
||||
# [cv2.imshow(f"r{i}", r) for i, r in enumerate(digit_rois)]
|
||||
# cv2.waitKey(0)
|
||||
samples = preprocess_hog(digit_rois)
|
||||
return ocr_digit_samples_knn(samples, self.knn_model)
|
||||
|
||||
def ocr_song_id(self, rois: DeviceV2Rois):
|
||||
jacket = cv2.cvtColor(rois.jacket, cv2.COLOR_BGR2GRAY)
|
||||
return self.phash_db.lookup_image(Image.fromarray(jacket))[0]
|
||||
|
||||
def ocr_rating_class(self, rois: DeviceV2Rois):
|
||||
roi = cv2.cvtColor(rois.max_recall_rating_class, cv2.COLOR_BGR2HSV)
|
||||
results = [mask_pst(roi), mask_prs(roi), mask_ftr(roi), mask_byd(roi)]
|
||||
return max(enumerate(results), key=lambda i: np.count_nonzero(i[1]))[0]
|
||||
|
||||
def ocr_score(self, rois: DeviceV2Rois):
|
||||
roi = cv2.cvtColor(rois.score, cv2.COLOR_BGR2HSV)
|
||||
roi = mask_white(roi)
|
||||
contours, _ = cv2.findContours(roi, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
for contour in contours:
|
||||
x, y, w, h = cv2.boundingRect(contour)
|
||||
if h < roi.shape[0] * 0.6:
|
||||
roi = cv2.fillPoly(roi, [contour], [0])
|
||||
return ocr_digits_by_contour_knn(roi, self.knn_model)
|
||||
|
||||
def mask_pfl(self, pfl_roi: Mat, rois: DeviceV2Rois):
|
||||
return (
|
||||
mask_pfl_white(cv2.cvtColor(pfl_roi, cv2.COLOR_BGR2HSV))
|
||||
if isinstance(rois.sizes, SizesV2)
|
||||
else mask_gray(pfl_roi)
|
||||
)
|
||||
|
||||
def ocr_pure(self, rois: DeviceV2Rois):
|
||||
roi = self.mask_pfl(rois.pure, rois)
|
||||
return self._base_ocr_pfl(roi, rois.sizes.factor)
|
||||
|
||||
def ocr_far(self, rois: DeviceV2Rois):
|
||||
roi = self.mask_pfl(rois.far, rois)
|
||||
return self._base_ocr_pfl(roi, rois.sizes.factor)
|
||||
|
||||
def ocr_lost(self, rois: DeviceV2Rois):
|
||||
roi = self.mask_pfl(rois.lost, rois)
|
||||
return self._base_ocr_pfl(roi, rois.sizes.factor)
|
||||
|
||||
def ocr_max_recall(self, rois: DeviceV2Rois):
|
||||
roi = (
|
||||
mask_max_recall_purple(
|
||||
cv2.cvtColor(rois.max_recall_rating_class, cv2.COLOR_BGR2HSV)
|
||||
)
|
||||
if isinstance(rois.sizes, SizesV2)
|
||||
else mask_gray(rois.max_recall_rating_class)
|
||||
)
|
||||
roi_closed = cv2.morphologyEx(roi, cv2.MORPH_CLOSE, MAX_RECALL_CLOSE_KERNEL)
|
||||
contours, _ = cv2.findContours(
|
||||
roi_closed, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
|
||||
)
|
||||
rects = sorted(
|
||||
[cv2.boundingRect(c) for c in contours], key=lambda r: r[0], reverse=True
|
||||
)
|
||||
max_recall_roi = crop_xywh(roi, rects[0])
|
||||
return ocr_digits_by_contour_knn(max_recall_roi, self.knn_model)
|
||||
|
||||
def ocr(self, rois: DeviceV2Rois):
|
||||
song_id = self.ocr_song_id(rois)
|
||||
rating_class = self.ocr_rating_class(rois)
|
||||
score = self.ocr_score(rois)
|
||||
pure = self.ocr_pure(rois)
|
||||
far = self.ocr_far(rois)
|
||||
lost = self.ocr_lost(rois)
|
||||
max_recall = self.ocr_max_recall(rois)
|
||||
|
||||
return DeviceOcrResult(
|
||||
rating_class=rating_class,
|
||||
pure=pure,
|
||||
far=far,
|
||||
lost=lost,
|
||||
score=score,
|
||||
max_recall=max_recall,
|
||||
song_id=song_id,
|
||||
)
|
@ -1,54 +0,0 @@
|
||||
import cv2
|
||||
|
||||
from ...types import Mat
|
||||
from .shared import *
|
||||
|
||||
|
||||
def find_digits_preprocess(__img_masked: Mat) -> Mat:
|
||||
img = __img_masked.copy()
|
||||
img_denoised = cv2.morphologyEx(img, cv2.MORPH_OPEN, PFL_DENOISE_KERNEL)
|
||||
# img_denoised = cv2.bitwise_and(img, img_denoised)
|
||||
|
||||
denoise_contours, _ = cv2.findContours(
|
||||
img_denoised, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE
|
||||
)
|
||||
# cv2.drawContours(img_denoised, contours, -1, [128], 2)
|
||||
|
||||
# fill all contour.area < max(contour.area) * ratio with black pixels
|
||||
# for denoise purposes
|
||||
|
||||
# define threshold contour area
|
||||
# we assume the smallest digit "1", is 80% height of the image,
|
||||
# and at least 1.5 pixel wide, considering cv2.contourArea always
|
||||
# returns a smaller value than the actual contour area.
|
||||
max_contour_area = __img_masked.shape[0] * 0.8 * 1.5
|
||||
filtered_contours = list(
|
||||
filter(lambda c: cv2.contourArea(c) >= max_contour_area, denoise_contours)
|
||||
)
|
||||
|
||||
filtered_contours_flattened = {tuple(c.flatten()) for c in filtered_contours}
|
||||
|
||||
for contour in denoise_contours:
|
||||
if tuple(contour.flatten()) not in filtered_contours_flattened:
|
||||
img_denoised = cv2.fillPoly(img_denoised, [contour], [0])
|
||||
|
||||
# old algorithm, finding the largest contour area
|
||||
## contour_area_tuples = [(contour, cv2.contourArea(contour)) for contour in contours]
|
||||
## contour_area_tuples = sorted(
|
||||
## contour_area_tuples, key=lambda item: item[1], reverse=True
|
||||
## )
|
||||
## max_contour_area = contour_area_tuples[0][1]
|
||||
## print(max_contour_area, [item[1] for item in contour_area_tuples])
|
||||
## contours_filter_end_index = len(contours)
|
||||
## for i, item in enumerate(contour_area_tuples):
|
||||
## contour, area = item
|
||||
## if area < max_contour_area * 0.15:
|
||||
## contours_filter_end_index = i
|
||||
## break
|
||||
## contours = [item[0] for item in contour_area_tuples]
|
||||
## for contour in contours[-contours_filter_end_index - 1:]:
|
||||
## img = cv2.fillPoly(img, [contour], [0])
|
||||
## img_denoised = cv2.fillPoly(img_denoised, [contour], [0])
|
||||
## contours = contours[:contours_filter_end_index]
|
||||
|
||||
return img_denoised
|
@ -1,199 +0,0 @@
|
||||
from typing import Union
|
||||
|
||||
from ...crop import crop_black_edges, crop_xywh
|
||||
from ...types import Mat, XYWHRect
|
||||
from .definition import DeviceV2
|
||||
from .sizes import Sizes, SizesV1
|
||||
|
||||
|
||||
def to_int(num: Union[int, float]) -> int:
|
||||
return round(num)
|
||||
|
||||
|
||||
class DeviceV2Rois:
|
||||
def __init__(self, device: DeviceV2, img: Mat):
|
||||
self.device = device
|
||||
self.sizes = SizesV1(self.device.factor)
|
||||
self.__img = img
|
||||
|
||||
@staticmethod
|
||||
def construct_int_xywh_rect(x, y, w, h) -> XYWHRect:
|
||||
return XYWHRect(*[to_int(item) for item in [x, y, w, h]])
|
||||
|
||||
@property
|
||||
def img(self):
|
||||
return self.__img
|
||||
|
||||
@img.setter
|
||||
def img(self, img: Mat):
|
||||
self.__img = (
|
||||
crop_black_edges(img) if self.device.crop_black_edges else img.copy()
|
||||
)
|
||||
|
||||
@property
|
||||
def h(self):
|
||||
return self.img.shape[0]
|
||||
|
||||
@property
|
||||
def vmid(self):
|
||||
return self.h / 2
|
||||
|
||||
@property
|
||||
def w(self):
|
||||
return self.img.shape[1]
|
||||
|
||||
@property
|
||||
def hmid(self):
|
||||
return self.w / 2
|
||||
|
||||
@property
|
||||
def h_without_top_bar(self):
|
||||
"""img_height -= top_bar_height"""
|
||||
return self.h - self.sizes.TOP_BAR_HEIGHT
|
||||
|
||||
@property
|
||||
def h_without_top_bar_mid(self):
|
||||
return self.sizes.TOP_BAR_HEIGHT + self.h_without_top_bar / 2
|
||||
|
||||
@property
|
||||
def pfl_top(self):
|
||||
return self.h_without_top_bar_mid + self.sizes.PFL_TOP_FROM_VMID
|
||||
|
||||
@property
|
||||
def pfl_left(self):
|
||||
return self.hmid + self.sizes.PFL_LEFT_FROM_HMID
|
||||
|
||||
@property
|
||||
def pure_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=self.pfl_left,
|
||||
y=self.pfl_top,
|
||||
w=self.sizes.PFL_WIDTH,
|
||||
h=self.sizes.PFL_FONT_PX,
|
||||
)
|
||||
|
||||
@property
|
||||
def pure(self):
|
||||
return crop_xywh(self.img, self.pure_rect)
|
||||
|
||||
@property
|
||||
def far_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=self.pfl_left,
|
||||
y=self.pfl_top + self.sizes.PFL_FONT_PX + self.sizes.PURE_FAR_GAP,
|
||||
w=self.sizes.PFL_WIDTH,
|
||||
h=self.sizes.PFL_FONT_PX,
|
||||
)
|
||||
|
||||
@property
|
||||
def far(self):
|
||||
return crop_xywh(self.img, self.far_rect)
|
||||
|
||||
@property
|
||||
def lost_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=self.pfl_left,
|
||||
y=(
|
||||
self.pfl_top
|
||||
+ self.sizes.PFL_FONT_PX * 2
|
||||
+ self.sizes.PURE_FAR_GAP
|
||||
+ self.sizes.FAR_LOST_GAP
|
||||
),
|
||||
w=self.sizes.PFL_WIDTH,
|
||||
h=self.sizes.PFL_FONT_PX,
|
||||
)
|
||||
|
||||
@property
|
||||
def lost(self):
|
||||
return crop_xywh(self.img, self.lost_rect)
|
||||
|
||||
@property
|
||||
def score_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=self.hmid - (self.sizes.SCORE_WIDTH / 2),
|
||||
y=(
|
||||
self.h_without_top_bar_mid
|
||||
+ self.sizes.SCORE_BOTTOM_FROM_VMID
|
||||
- self.sizes.SCORE_FONT_PX
|
||||
),
|
||||
w=self.sizes.SCORE_WIDTH,
|
||||
h=self.sizes.SCORE_FONT_PX,
|
||||
)
|
||||
|
||||
@property
|
||||
def score(self):
|
||||
return crop_xywh(self.img, self.score_rect)
|
||||
|
||||
@property
|
||||
def max_recall_rating_class_rect(self):
|
||||
x = (
|
||||
self.hmid
|
||||
+ self.sizes.JACKET_RIGHT_FROM_HOR_MID
|
||||
- self.sizes.JACKET_WIDTH
|
||||
- 25 * self.sizes.factor
|
||||
)
|
||||
return self.construct_int_xywh_rect(
|
||||
x=x,
|
||||
y=(
|
||||
self.h_without_top_bar_mid
|
||||
- self.sizes.SCORE_PANEL[1] / 2
|
||||
- self.sizes.MR_RT_HEIGHT
|
||||
),
|
||||
w=self.sizes.MR_RT_WIDTH,
|
||||
h=self.sizes.MR_RT_HEIGHT,
|
||||
)
|
||||
|
||||
@property
|
||||
def max_recall_rating_class(self):
|
||||
return crop_xywh(self.img, self.max_recall_rating_class_rect)
|
||||
|
||||
@property
|
||||
def title_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=0,
|
||||
y=self.h_without_top_bar_mid
|
||||
+ self.sizes.TITLE_BOTTOM_FROM_VMID
|
||||
- self.sizes.TITLE_FONT_PX,
|
||||
w=self.hmid + self.sizes.TITLE_WIDTH_RIGHT,
|
||||
h=self.sizes.TITLE_FONT_PX,
|
||||
)
|
||||
|
||||
@property
|
||||
def title(self):
|
||||
return crop_xywh(self.img, self.title_rect)
|
||||
|
||||
@property
|
||||
def jacket_rect(self):
|
||||
return self.construct_int_xywh_rect(
|
||||
x=self.hmid
|
||||
+ self.sizes.JACKET_RIGHT_FROM_HOR_MID
|
||||
- self.sizes.JACKET_WIDTH,
|
||||
y=self.h_without_top_bar_mid - self.sizes.SCORE_PANEL[1] / 2,
|
||||
w=self.sizes.JACKET_WIDTH,
|
||||
h=self.sizes.JACKET_WIDTH,
|
||||
)
|
||||
|
||||
@property
|
||||
def jacket(self):
|
||||
return crop_xywh(self.img, self.jacket_rect)
|
||||
|
||||
|
||||
class DeviceV2AutoRois(DeviceV2Rois):
|
||||
@staticmethod
|
||||
def get_factor(width: int, height: int):
|
||||
ratio = width / height
|
||||
return ((width / 16) * 9) / 720 if ratio < (16 / 9) else height / 720
|
||||
|
||||
def __init__(self, img: Mat):
|
||||
factor = self.get_factor(img.shape[1], img.shape[0])
|
||||
self.sizes = SizesV1(factor)
|
||||
self.__img = None
|
||||
self.img = img
|
||||
|
||||
@property
|
||||
def img(self):
|
||||
return self.__img
|
||||
|
||||
@img.setter
|
||||
def img(self, img: Mat):
|
||||
self.__img = crop_black_edges(img)
|
@ -1,9 +0,0 @@
|
||||
from cv2 import MORPH_RECT, getStructuringElement
|
||||
|
||||
PFL_DENOISE_KERNEL = getStructuringElement(MORPH_RECT, [2, 2])
|
||||
PFL_ERODE_KERNEL = getStructuringElement(MORPH_RECT, [3, 3])
|
||||
PFL_CLOSE_HORIZONTAL_KERNEL = getStructuringElement(MORPH_RECT, [10, 1])
|
||||
|
||||
MAX_RECALL_DENOISE_KERNEL = getStructuringElement(MORPH_RECT, [3, 3])
|
||||
MAX_RECALL_ERODE_KERNEL = getStructuringElement(MORPH_RECT, [2, 2])
|
||||
MAX_RECALL_CLOSE_KERNEL = getStructuringElement(MORPH_RECT, [20, 1])
|
@ -1,254 +0,0 @@
|
||||
from typing import Tuple, Union
|
||||
|
||||
|
||||
def apply_factor(num: Union[int, float], factor: float):
|
||||
return num * factor
|
||||
|
||||
|
||||
class Sizes:
|
||||
def __init__(self, factor: float):
|
||||
raise NotImplementedError()
|
||||
|
||||
@property
|
||||
def TOP_BAR_HEIGHT(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def SCORE_PANEL(self) -> Tuple[int, int]:
|
||||
...
|
||||
|
||||
@property
|
||||
def PFL_TOP_FROM_VMID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def PFL_LEFT_FROM_HMID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def PFL_WIDTH(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def PFL_FONT_PX(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def PURE_FAR_GAP(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def FAR_LOST_GAP(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def SCORE_BOTTOM_FROM_VMID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def SCORE_FONT_PX(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def SCORE_WIDTH(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def JACKET_RIGHT_FROM_HOR_MID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def JACKET_WIDTH(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def MR_RT_RIGHT_FROM_HMID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def MR_RT_WIDTH(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def MR_RT_HEIGHT(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def TITLE_BOTTOM_FROM_VMID(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def TITLE_FONT_PX(self):
|
||||
...
|
||||
|
||||
@property
|
||||
def TITLE_WIDTH_RIGHT(self):
|
||||
...
|
||||
|
||||
|
||||
class SizesV1(Sizes):
|
||||
def __init__(self, factor: float):
|
||||
self.factor = factor
|
||||
|
||||
def apply_factor(self, num):
|
||||
return apply_factor(num, self.factor)
|
||||
|
||||
@property
|
||||
def TOP_BAR_HEIGHT(self):
|
||||
return self.apply_factor(50)
|
||||
|
||||
@property
|
||||
def SCORE_PANEL(self) -> Tuple[int, int]:
|
||||
return tuple(self.apply_factor(num) for num in [485, 239])
|
||||
|
||||
@property
|
||||
def PFL_TOP_FROM_VMID(self):
|
||||
return self.apply_factor(135)
|
||||
|
||||
@property
|
||||
def PFL_LEFT_FROM_HMID(self):
|
||||
return self.apply_factor(5)
|
||||
|
||||
@property
|
||||
def PFL_WIDTH(self):
|
||||
return self.apply_factor(76)
|
||||
|
||||
@property
|
||||
def PFL_FONT_PX(self):
|
||||
return self.apply_factor(26)
|
||||
|
||||
@property
|
||||
def PURE_FAR_GAP(self):
|
||||
return self.apply_factor(12)
|
||||
|
||||
@property
|
||||
def FAR_LOST_GAP(self):
|
||||
return self.apply_factor(10)
|
||||
|
||||
@property
|
||||
def SCORE_BOTTOM_FROM_VMID(self):
|
||||
return self.apply_factor(-50)
|
||||
|
||||
@property
|
||||
def SCORE_FONT_PX(self):
|
||||
return self.apply_factor(45)
|
||||
|
||||
@property
|
||||
def SCORE_WIDTH(self):
|
||||
return self.apply_factor(280)
|
||||
|
||||
@property
|
||||
def JACKET_RIGHT_FROM_HOR_MID(self):
|
||||
return self.apply_factor(-235)
|
||||
|
||||
@property
|
||||
def JACKET_WIDTH(self):
|
||||
return self.apply_factor(375)
|
||||
|
||||
@property
|
||||
def MR_RT_RIGHT_FROM_HMID(self):
|
||||
return self.apply_factor(-300)
|
||||
|
||||
@property
|
||||
def MR_RT_WIDTH(self):
|
||||
return self.apply_factor(275)
|
||||
|
||||
@property
|
||||
def MR_RT_HEIGHT(self):
|
||||
return self.apply_factor(75)
|
||||
|
||||
@property
|
||||
def TITLE_BOTTOM_FROM_VMID(self):
|
||||
return self.apply_factor(-265)
|
||||
|
||||
@property
|
||||
def TITLE_FONT_PX(self):
|
||||
return self.apply_factor(40)
|
||||
|
||||
@property
|
||||
def TITLE_WIDTH_RIGHT(self):
|
||||
return self.apply_factor(275)
|
||||
|
||||
|
||||
class SizesV2(Sizes):
|
||||
def __init__(self, factor: float):
|
||||
self.factor = factor
|
||||
|
||||
def apply_factor(self, num):
|
||||
return apply_factor(num, self.factor)
|
||||
|
||||
@property
|
||||
def TOP_BAR_HEIGHT(self):
|
||||
return self.apply_factor(50)
|
||||
|
||||
@property
|
||||
def SCORE_PANEL(self) -> Tuple[int, int]:
|
||||
return tuple(self.apply_factor(num) for num in [447, 233])
|
||||
|
||||
@property
|
||||
def PFL_TOP_FROM_VMID(self):
|
||||
return self.apply_factor(142)
|
||||
|
||||
@property
|
||||
def PFL_LEFT_FROM_HMID(self):
|
||||
return self.apply_factor(10)
|
||||
|
||||
@property
|
||||
def PFL_WIDTH(self):
|
||||
return self.apply_factor(60)
|
||||
|
||||
@property
|
||||
def PFL_FONT_PX(self):
|
||||
return self.apply_factor(16)
|
||||
|
||||
@property
|
||||
def PURE_FAR_GAP(self):
|
||||
return self.apply_factor(20)
|
||||
|
||||
@property
|
||||
def FAR_LOST_GAP(self):
|
||||
return self.apply_factor(23)
|
||||
|
||||
@property
|
||||
def SCORE_BOTTOM_FROM_VMID(self):
|
||||
return self.apply_factor(-50)
|
||||
|
||||
@property
|
||||
def SCORE_FONT_PX(self):
|
||||
return self.apply_factor(45)
|
||||
|
||||
@property
|
||||
def SCORE_WIDTH(self):
|
||||
return self.apply_factor(280)
|
||||
|
||||
@property
|
||||
def JACKET_RIGHT_FROM_HOR_MID(self):
|
||||
return self.apply_factor(-235)
|
||||
|
||||
@property
|
||||
def JACKET_WIDTH(self):
|
||||
return self.apply_factor(375)
|
||||
|
||||
@property
|
||||
def MR_RT_RIGHT_FROM_HMID(self):
|
||||
return self.apply_factor(-330)
|
||||
|
||||
@property
|
||||
def MR_RT_WIDTH(self):
|
||||
return self.apply_factor(330)
|
||||
|
||||
@property
|
||||
def MR_RT_HEIGHT(self):
|
||||
return self.apply_factor(75)
|
||||
|
||||
@property
|
||||
def TITLE_BOTTOM_FROM_VMID(self):
|
||||
return self.apply_factor(-265)
|
||||
|
||||
@property
|
||||
def TITLE_FONT_PX(self):
|
||||
return self.apply_factor(40)
|
||||
|
||||
@property
|
||||
def TITLE_WIDTH_RIGHT(self):
|
||||
return self.apply_factor(275)
|
@ -1,2 +0,0 @@
|
||||
from .common import Extractor
|
||||
from .sizes import *
|
@ -1,3 +0,0 @@
|
||||
from .common import AutoSizes
|
||||
from .t1 import AutoSizesT1
|
||||
from .t2 import AutoSizesT2
|
@ -1,119 +0,0 @@
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from .types import Mat
|
||||
|
||||
__all__ = [
|
||||
"GRAY_MIN_HSV",
|
||||
"GRAY_MAX_HSV",
|
||||
"WHITE_MIN_HSV",
|
||||
"WHITE_MAX_HSV",
|
||||
"PFL_WHITE_MIN_HSV",
|
||||
"PFL_WHITE_MAX_HSV",
|
||||
"PST_MIN_HSV",
|
||||
"PST_MAX_HSV",
|
||||
"PRS_MIN_HSV",
|
||||
"PRS_MAX_HSV",
|
||||
"FTR_MIN_HSV",
|
||||
"FTR_MAX_HSV",
|
||||
"BYD_MIN_HSV",
|
||||
"BYD_MAX_HSV",
|
||||
"MAX_RECALL_PURPLE_MIN_HSV",
|
||||
"MAX_RECALL_PURPLE_MAX_HSV",
|
||||
"mask_gray",
|
||||
"mask_white",
|
||||
"mask_pfl_white",
|
||||
"mask_pst",
|
||||
"mask_prs",
|
||||
"mask_ftr",
|
||||
"mask_byd",
|
||||
"mask_rating_class",
|
||||
"mask_max_recall_purple",
|
||||
]
|
||||
|
||||
GRAY_MIN_HSV = np.array([0, 0, 70], np.uint8)
|
||||
GRAY_MAX_HSV = np.array([0, 0, 200], np.uint8)
|
||||
|
||||
GRAY_MIN_BGR = np.array([50] * 3, np.uint8)
|
||||
GRAY_MAX_BGR = np.array([160] * 3, np.uint8)
|
||||
|
||||
WHITE_MIN_HSV = np.array([0, 0, 240], np.uint8)
|
||||
WHITE_MAX_HSV = np.array([179, 10, 255], np.uint8)
|
||||
|
||||
PFL_WHITE_MIN_HSV = np.array([0, 0, 248], np.uint8)
|
||||
PFL_WHITE_MAX_HSV = np.array([179, 10, 255], np.uint8)
|
||||
|
||||
PST_MIN_HSV = np.array([100, 50, 80], np.uint8)
|
||||
PST_MAX_HSV = np.array([100, 255, 255], np.uint8)
|
||||
|
||||
PRS_MIN_HSV = np.array([43, 40, 75], np.uint8)
|
||||
PRS_MAX_HSV = np.array([50, 155, 190], np.uint8)
|
||||
|
||||
FTR_MIN_HSV = np.array([149, 30, 0], np.uint8)
|
||||
FTR_MAX_HSV = np.array([155, 181, 150], np.uint8)
|
||||
|
||||
BYD_MIN_HSV = np.array([170, 50, 50], np.uint8)
|
||||
BYD_MAX_HSV = np.array([179, 210, 198], np.uint8)
|
||||
|
||||
MAX_RECALL_PURPLE_MIN_HSV = np.array([125, 0, 0], np.uint8)
|
||||
MAX_RECALL_PURPLE_MAX_HSV = np.array([130, 100, 150], np.uint8)
|
||||
|
||||
|
||||
def mask_gray(__img_bgr: Mat):
|
||||
# bgr_value_equal_mask = all(__img_bgr[:, 1:] == __img_bgr[:, :-1], axis=1)
|
||||
bgr_value_equal_mask = np.max(__img_bgr, axis=2) - np.min(__img_bgr, axis=2) <= 5
|
||||
img_bgr = __img_bgr.copy()
|
||||
img_bgr[~bgr_value_equal_mask] = np.array([0, 0, 0], __img_bgr.dtype)
|
||||
img_bgr = cv2.erode(img_bgr, cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)))
|
||||
img_bgr = cv2.dilate(img_bgr, cv2.getStructuringElement(cv2.MORPH_RECT, (1, 1)))
|
||||
return cv2.inRange(img_bgr, GRAY_MIN_BGR, GRAY_MAX_BGR)
|
||||
|
||||
|
||||
def mask_white(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, WHITE_MIN_HSV, WHITE_MAX_HSV)
|
||||
mask = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_pfl_white(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, PFL_WHITE_MIN_HSV, PFL_WHITE_MAX_HSV)
|
||||
mask = cv2.dilate(mask, cv2.getStructuringElement(cv2.MORPH_RECT, (2, 2)))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_pst(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, PST_MIN_HSV, PST_MAX_HSV)
|
||||
mask = cv2.dilate(mask, (1, 1))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_prs(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, PRS_MIN_HSV, PRS_MAX_HSV)
|
||||
mask = cv2.dilate(mask, (1, 1))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_ftr(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, FTR_MIN_HSV, FTR_MAX_HSV)
|
||||
mask = cv2.dilate(mask, (1, 1))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_byd(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, BYD_MIN_HSV, BYD_MAX_HSV)
|
||||
mask = cv2.dilate(mask, (2, 2))
|
||||
return mask
|
||||
|
||||
|
||||
def mask_rating_class(img_hsv: Mat):
|
||||
pst = mask_pst(img_hsv)
|
||||
prs = mask_prs(img_hsv)
|
||||
ftr = mask_ftr(img_hsv)
|
||||
byd = mask_byd(img_hsv)
|
||||
return cv2.bitwise_or(byd, cv2.bitwise_or(ftr, cv2.bitwise_or(pst, prs)))
|
||||
|
||||
|
||||
def mask_max_recall_purple(img_hsv: Mat):
|
||||
mask = cv2.inRange(img_hsv, MAX_RECALL_PURPLE_MIN_HSV, MAX_RECALL_PURPLE_MAX_HSV)
|
||||
mask = cv2.dilate(mask, (2, 2))
|
||||
return mask
|
@ -1,55 +0,0 @@
|
||||
import cv2
|
||||
|
||||
|
||||
class Masker:
|
||||
@staticmethod
|
||||
def pure(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def far(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def lost(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def score(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def rating_class_pst(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def rating_class_prs(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def rating_class_ftr(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def rating_class_byd(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def max_recall(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def clear_status_track_lost(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def clear_status_track_complete(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def clear_status_full_recall(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
||||
|
||||
@staticmethod
|
||||
def clear_status_pure_memory(roi_bgr: cv2.Mat) -> cv2.Mat:
|
||||
raise NotImplementedError()
|
@ -7,7 +7,6 @@ import numpy as np
|
||||
from numpy.linalg import norm
|
||||
|
||||
from .crop import crop_xywh
|
||||
from .mask import mask_byd, mask_ftr, mask_prs, mask_pst
|
||||
from .types import Mat, cv2_ml_KNearest
|
||||
|
||||
__all__ = [
|
||||
@ -199,13 +198,3 @@ def ocr_digits_by_contour_knn(
|
||||
) -> int:
|
||||
samples = ocr_digits_by_contour_get_samples(__roi_gray, size)
|
||||
return ocr_digit_samples_knn(samples, knn_model, k)
|
||||
|
||||
|
||||
def ocr_rating_class(roi_hsv: Mat):
|
||||
mask_results = [
|
||||
mask_pst(roi_hsv),
|
||||
mask_prs(roi_hsv),
|
||||
mask_ftr(roi_hsv),
|
||||
mask_byd(roi_hsv),
|
||||
]
|
||||
return max(enumerate(mask_results), key=lambda e: np.count_nonzero(e[1]))[0]
|
||||
|
@ -1,110 +0,0 @@
|
||||
import io
|
||||
import sqlite3
|
||||
from gzip import GzipFile
|
||||
from typing import Tuple
|
||||
|
||||
import cv2
|
||||
import numpy as np
|
||||
|
||||
from .types import Mat
|
||||
|
||||
|
||||
class SIFTDatabase:
|
||||
def __init__(self, db_path: str, load: bool = True):
|
||||
self.__db_path = db_path
|
||||
self.__tags = []
|
||||
self.__descriptors = []
|
||||
self.__size = None
|
||||
|
||||
self.__sift = cv2.SIFT_create()
|
||||
self.__bf_matcher = cv2.BFMatcher()
|
||||
|
||||
if load:
|
||||
self.load_db()
|
||||
|
||||
@property
|
||||
def db_path(self):
|
||||
return self.__db_path
|
||||
|
||||
@db_path.setter
|
||||
def db_path(self, value):
|
||||
self.__db_path = value
|
||||
|
||||
@property
|
||||
def tags(self):
|
||||
return self.__tags
|
||||
|
||||
@property
|
||||
def descriptors(self):
|
||||
return self.__descriptors
|
||||
|
||||
@property
|
||||
def size(self):
|
||||
return self.__size
|
||||
|
||||
@size.setter
|
||||
def size(self, value: Tuple[int, int]):
|
||||
self.__size = value
|
||||
|
||||
@property
|
||||
def sift(self):
|
||||
return self.__sift
|
||||
|
||||
@property
|
||||
def bf_matcher(self):
|
||||
return self.__bf_matcher
|
||||
|
||||
def load_db(self):
|
||||
conn = sqlite3.connect(self.db_path)
|
||||
with conn:
|
||||
cursor = conn.cursor()
|
||||
|
||||
size_str = cursor.execute(
|
||||
"SELECT value FROM properties WHERE id = 'size'"
|
||||
).fetchone()[0]
|
||||
sizr_str_arr = size_str.split(", ")
|
||||
self.size = tuple(int(s) for s in sizr_str_arr)
|
||||
tag__descriptors_bytes = cursor.execute(
|
||||
"SELECT tag, descriptors FROM sift"
|
||||
).fetchall()
|
||||
|
||||
gzipped = int(
|
||||
cursor.execute(
|
||||
"SELECT value FROM properties WHERE id = 'gzip'"
|
||||
).fetchone()[0]
|
||||
)
|
||||
for tag, descriptor_bytes in tag__descriptors_bytes:
|
||||
buffer = io.BytesIO(descriptor_bytes)
|
||||
self.tags.append(tag)
|
||||
if gzipped == 0:
|
||||
self.descriptors.append(np.load(buffer))
|
||||
else:
|
||||
gzipped_buffer = GzipFile(None, "rb", fileobj=buffer)
|
||||
self.descriptors.append(np.load(gzipped_buffer))
|
||||
|
||||
def lookup_img(
|
||||
self,
|
||||
__img: Mat,
|
||||
*,
|
||||
sift=None,
|
||||
bf=None,
|
||||
) -> Tuple[str, float]:
|
||||
sift = sift or self.sift
|
||||
bf = bf or self.bf_matcher
|
||||
|
||||
img = __img.copy()
|
||||
if self.size is not None:
|
||||
img = cv2.resize(img, self.size)
|
||||
_, descriptors = sift.detectAndCompute(img, None)
|
||||
|
||||
good_results = []
|
||||
for des in self.descriptors:
|
||||
matches = bf.knnMatch(descriptors, des, k=2)
|
||||
good = sum(m.distance < 0.75 * n.distance for m, n in matches)
|
||||
good_results.append(good)
|
||||
best_match_index = max(enumerate(good_results), key=lambda i: i[1])[0]
|
||||
|
||||
return (
|
||||
self.tags[best_match_index],
|
||||
good_results[best_match_index] / len(descriptors),
|
||||
)
|
Loading…
x
Reference in New Issue
Block a user