246 lines
6.2 KiB
Python
246 lines
6.2 KiB
Python
from collections import namedtuple
|
|
from functools import partial
|
|
|
|
import cv2
|
|
import numpy as np
|
|
from matplotlib import pyplot as plt
|
|
|
|
|
|
def show_mpl(image):
|
|
|
|
fig, ax = plt.subplots(1, 1)
|
|
fig.set_size_inches(20, 20)
|
|
ax.imshow(image)
|
|
plt.show()
|
|
|
|
|
|
def show_cv2(image):
|
|
|
|
cv2.imshow("", image)
|
|
cv2.waitKey(0)
|
|
|
|
|
|
def copy_and_normalize_channels(image):
|
|
|
|
image = image.copy()
|
|
try:
|
|
image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR)
|
|
except cv2.error:
|
|
pass
|
|
|
|
return image
|
|
|
|
|
|
def draw_contours(image, contours):
|
|
|
|
image = copy_and_normalize_channels(image)
|
|
|
|
for cont in contours:
|
|
cv2.drawContours(image, cont, -1, (0, 255, 0), 4)
|
|
|
|
return image
|
|
|
|
|
|
def draw_rectangles(image, rectangles, color=None):
|
|
|
|
image = copy_and_normalize_channels(image)
|
|
|
|
if not color:
|
|
color = (0, 255, 0)
|
|
|
|
for rect in rectangles:
|
|
x, y, w, h = rect
|
|
cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
|
|
|
|
return image
|
|
|
|
|
|
def draw_stats(image, stats, annotate=False):
|
|
|
|
image = copy_and_normalize_channels(image)
|
|
|
|
keys = ["x", "y", "w", "h"]
|
|
|
|
def annotate_stat(x, y, w, h):
|
|
|
|
for i, (s, v) in enumerate(zip(keys, [x, y, w, h])):
|
|
anno = f"{s} = {v}"
|
|
xann = int(x + 5)
|
|
yann = int(y + h - (20 * (i + 1)))
|
|
cv2.putText(image, anno, (xann, yann), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 255, 0), 2)
|
|
|
|
def draw_stat(stat):
|
|
|
|
x, y, w, h, area = stat
|
|
|
|
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
|
|
|
if annotate:
|
|
annotate_stat(x, y, w, h)
|
|
|
|
for stat in stats[2:]:
|
|
draw_stat(stat)
|
|
|
|
return image
|
|
|
|
|
|
def remove_overlapping(rectangles):
|
|
def overlap(a, b):
|
|
return compute_intersection(a, b) > 0
|
|
|
|
def does_not_overlap(rect, rectangles):
|
|
return not any(overlap(rect, r2) for r2 in rectangles if not rect == r2)
|
|
|
|
rectangles = list(map(xywh_to_vec_rect, rectangles))
|
|
rectangles = filter(partial(does_not_overlap, rectangles=rectangles), rectangles)
|
|
rectangles = map(vec_rect_to_xywh, rectangles)
|
|
return rectangles
|
|
|
|
|
|
def remove_included(rectangles):
|
|
def included(a, b):
|
|
return b.xmin >= a.xmin and b.ymin >= a.ymin and b.xmax <= a.xmax and b.ymax <= a.ymax
|
|
|
|
def is_not_included(rect, rectangles):
|
|
return not any(included(r2, rect) for r2 in rectangles if not rect == r2)
|
|
|
|
rectangles = list(map(xywh_to_vec_rect, rectangles))
|
|
rectangles = filter(partial(is_not_included, rectangles=rectangles), rectangles)
|
|
rectangles = map(vec_rect_to_xywh, rectangles)
|
|
return rectangles
|
|
|
|
|
|
Rectangle = namedtuple("Rectangle", "xmin ymin xmax ymax")
|
|
|
|
|
|
def make_box(x1, y1, x2, y2):
|
|
keys = "x1", "y1", "x2", "y2"
|
|
return dict(zip(keys, [x1, y1, x2, y2]))
|
|
|
|
|
|
def compute_intersection(a, b):
|
|
|
|
dx = min(a.xmax, b.xmax) - max(a.xmin, b.xmin)
|
|
dy = min(a.ymax, b.ymax) - max(a.ymin, b.ymin)
|
|
|
|
return dx * dy if (dx >= 0) and (dy >= 0) else 0
|
|
|
|
|
|
def has_no_parent(hierarchy):
|
|
return hierarchy[-1] <= 0
|
|
|
|
|
|
def xywh_to_vec_rect(rect):
|
|
x1, y1, w, h = rect
|
|
x2 = x1 + w
|
|
y2 = y1 + h
|
|
return Rectangle(x1, y1, x2, y2)
|
|
|
|
|
|
def vec_rect_to_xywh(rect):
|
|
x, y, x2, y2 = rect
|
|
w = x2 - x
|
|
h = y2 - y
|
|
return x, y, w, h
|
|
|
|
|
|
def find_primary_text_regions(image):
|
|
"""Finds regions of primary text, meaning no figure descriptions for example, but main text body paragraphs.
|
|
|
|
Args:
|
|
image: Image to remove primary text from.
|
|
|
|
Returns:
|
|
Image with primary text removed.
|
|
|
|
References:
|
|
https://stackoverflow.com/questions/58349726/opencv-how-to-remove-text-from-background
|
|
"""
|
|
|
|
def is_likely_primary_text_segments(cnt):
|
|
return 800 < cv2.contourArea(cnt) < 15000
|
|
|
|
image = image.copy()
|
|
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
|
thresh = cv2.threshold(gray, 253, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
|
|
|
|
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 3))
|
|
close = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, close_kernel, iterations=1)
|
|
|
|
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 3))
|
|
dilate = cv2.dilate(close, dilate_kernel, iterations=1)
|
|
|
|
cnts, _ = cv2.findContours(dilate, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
|
|
|
cnts = filter(is_likely_primary_text_segments, cnts)
|
|
|
|
return cnts
|
|
|
|
|
|
def remove_primary_text_regions(image):
|
|
"""Removes regions of primary text, meaning no figure descriptions for example, but main text body paragraphs.
|
|
|
|
Args:
|
|
image: Image to remove primary text from.
|
|
|
|
Returns:
|
|
Image with primary text removed.
|
|
"""
|
|
|
|
image = image.copy()
|
|
|
|
cnts = find_primary_text_regions(image)
|
|
|
|
for cnt in cnts:
|
|
x, y, w, h = cv2.boundingRect(cnt)
|
|
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 255, 255), -1)
|
|
|
|
return image
|
|
|
|
|
|
def detect_large_coherent_structures(image: np.array):
|
|
"""Detects large coherent structures on an image.
|
|
|
|
References:
|
|
https://stackoverflow.com/questions/60259169/how-to-group-nearby-contours-in-opencv-python-zebra-crossing-detection
|
|
"""
|
|
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
|
|
|
thresh = cv2.threshold(gray, 253, 255, cv2.THRESH_BINARY)[1]
|
|
|
|
dilate_kernel = cv2.getStructuringElement(cv2.MORPH_OPEN, (5, 5))
|
|
dilate = cv2.dilate(~thresh, dilate_kernel, iterations=4)
|
|
|
|
close_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 20))
|
|
close = cv2.morphologyEx(dilate, cv2.MORPH_CLOSE, close_kernel, iterations=1)
|
|
|
|
cnts, _ = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
|
|
|
return cnts
|
|
|
|
|
|
def is_large_enough(cont, min_area):
|
|
return cv2.contourArea(cont, False) > min_area
|
|
|
|
|
|
def has_acceptable_format(cont, max_width_to_height_ratio):
|
|
_, _, w, h = cv2.boundingRect(cont)
|
|
return max_width_to_height_ratio >= w / h >= (1 / max_width_to_height_ratio)
|
|
|
|
|
|
def is_filled(hierarchy):
|
|
"""Checks whether a hierarchy is filled.
|
|
|
|
References:
|
|
https://stackoverflow.com/questions/60095520/how-to-distinguish-filled-circle-contour-and-unfilled-circle-contour-in-opencv
|
|
"""
|
|
return hierarchy[3] <= 0 and hierarchy[2] == -1
|
|
|
|
|
|
def is_boxy(contour):
|
|
epsilon = 0.01 * cv2.arcLength(contour, True)
|
|
approx = cv2.approxPolyDP(contour, epsilon, True)
|
|
return len(approx) <= 10
|