248 lines
8.2 KiB
Python

import cv2
import numpy as np
from funcy import lfilter, lmap
from cv_analysis.layout_parsing import parse_layout
from cv_analysis.utils.postprocessing import (
remove_isolated,
) # xywh_to_vecs, xywh_to_vec_rect, adjacent1d
from cv_analysis.utils.structures import Rectangle
from cv_analysis.utils.visual_logging import vizlogger
def add_external_contours(image, image_h_w_lines_only):
contours, _ = cv2.findContours(
image_h_w_lines_only, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
)
for cnt in contours:
x, y, w, h = cv2.boundingRect(cnt)
cv2.rectangle(image, (x, y), (x + w, y + h), 255, 1)
return image
def apply_motion_blur(image: np.array, angle, size=80):
"""Solidifies and slightly extends detected lines.
Args:
image (np.array): page image as array
angle: direction in which to apply blur, 0 or 90
size (int): kernel size; 80 found empirically to work well
Returns:
np.array
"""
k = np.zeros((size, size), dtype=np.float32)
vizlogger.debug(k, "tables08_blur_kernel1.png")
k[(size - 1) // 2, :] = np.ones(size, dtype=np.float32)
vizlogger.debug(k, "tables09_blur_kernel2.png")
k = cv2.warpAffine(
k,
cv2.getRotationMatrix2D((size / 2 - 0.5, size / 2 - 0.5), angle, 1.0),
(size, size),
)
vizlogger.debug(k, "tables10_blur_kernel3.png")
k = k * (1.0 / np.sum(k))
vizlogger.debug(k, "tables11_blur_kernel4.png")
blurred = cv2.filter2D(image, -1, k)
return blurred
def isolate_vertical_and_horizontal_components(img_bin):
"""Identifies and reinforces horizontal and vertical lines in a binary image.
Args:
img_bin (np.array): array corresponding to single binarized page image
bounding_rects (list): list of layout boxes of the form (x, y, w, h), potentially containing tables
Returns:
np.array
"""
line_min_width = 48
kernel_h = np.ones((1, line_min_width), np.uint8)
kernel_v = np.ones((line_min_width, 1), np.uint8)
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
img_lines_raw = img_bin_v | img_bin_h
kernel_h = np.ones((1, 30), np.uint8)
kernel_v = np.ones((30, 1), np.uint8)
img_bin_h = cv2.dilate(img_bin_h, kernel_h, iterations=2)
img_bin_v = cv2.dilate(img_bin_v, kernel_v, iterations=2)
img_bin_h = apply_motion_blur(img_bin_h, 0)
img_bin_v = apply_motion_blur(img_bin_v, 90)
img_bin_extended = img_bin_h | img_bin_v
_, img_bin_extended = cv2.threshold(img_bin_extended, 120, 255, cv2.THRESH_BINARY)
img_bin_final = cv2.dilate(
img_bin_extended, np.ones((1, 1), np.uint8), iterations=1
)
# add contours before lines are extended by blurring
img_bin_final = add_external_contours(img_bin_final, img_lines_raw)
return img_bin_final
def find_table_layout_boxes(image: np.array):
def is_large_enough(box):
(_, _, w, h) = box
if w * h >= 100000:
return Rectangle.from_xywh(box)
layout_boxes = parse_layout(image)
a = lmap(is_large_enough, layout_boxes)
return lmap(is_large_enough, layout_boxes)
def preprocess(image: np.array):
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) if len(image.shape) > 2 else image
_, image = cv2.threshold(image, 195, 255, cv2.THRESH_BINARY)
return ~image
def turn_connected_components_into_rects(image: np.array):
def is_large_enough(stat):
x1, y1, w, h, area = stat
return area > 2000 and w > 35 and h > 25
_, _, stats, _ = cv2.connectedComponentsWithStats(
~image, connectivity=8, ltype=cv2.CV_32S
)
stats = lfilter(is_large_enough, stats)
if stats:
stats = np.vstack(stats)
return stats[:, :-1][2:]
return []
def parse_tables(image: np.array, show=False):
"""Runs the full table parsing process.
Args:
image (np.array): single PDF page, converted to a numpy array
Returns:
list: list of rectangles corresponding to table cells
"""
image = preprocess(image)
image = isolate_vertical_and_horizontal_components(image)
rects = turn_connected_components_into_rects(image)
rects = list(map(Rectangle.from_xywh, rects))
rects = remove_isolated(rects)
return rects
# def make_lines(image: np.array, horizontal=True, kernel_length=40)
def detect_horizontal_lines(image_bin: np.array, kernel_length=40):
line_min_width = 48
kernel_h = np.ones((1, line_min_width), np.uint8)
img_bin_h = cv2.morphologyEx(image_bin, cv2.MORPH_OPEN, kernel_h)
kernel_h = np.ones((1, 30), np.uint8)
img_bin_h = cv2.dilate(img_bin_h, kernel_h, iterations=2)
img_bin_h = apply_motion_blur(img_bin_h, 0)
_, img_bin_h = cv2.threshold(img_bin_h, 120, 255, cv2.THRESH_BINARY)
# img_bin_h = cv2.dilate(img_bin_h, np.ones((1, 1), np.uint8), iterations=1)
return img_bin_h
def detect_vertical_lines(image_bin: np.array, kernel_length=40):
line_min_width = 48
kernel_v = np.ones((line_min_width, 1), np.uint8)
img_bin_v = cv2.morphologyEx(image_bin, cv2.MORPH_OPEN, kernel_v)
kernel_v = np.ones((30, 1), np.uint8)
img_bin_v = cv2.dilate(img_bin_v, kernel_v, iterations=2)
img_bin_v = apply_motion_blur(img_bin_v, 90)
_, img_bin_v = cv2.threshold(img_bin_v, 120, 255, cv2.THRESH_BINARY)
# img_bin_v = cv2.dilate(img_bin_v, np.ones((1, 1), np.uint8), iterations=1)
return img_bin_v
def detect_endpoints(
image: np.array, is_horizontal: bool
) -> list[tuple[int, int, int, int]]:
def are_collinear(
quad1: tuple[int, int, int, int], quad2: tuple[int, int, int, int], index: int
) -> bool:
dist_a = abs(quad1[index] - quad2[index])
dist_b = abs(quad1[index + 2] - quad2[index + 2])
overlap = True if index else (quad1[1] >= quad2[3] or quad1[3] >= quad2[1])
return (dist_a < 15) and (dist_b < 15) and overlap
points = cv2.HoughLinesP(
image, # Input edge image
1, # Distance resolution in pixels
np.pi / 180, # Angle resolution in radians
threshold=100, # Min number of votes for valid line
minLineLength=200, # Min allowed length of line
maxLineGap=10, # Max allowed gap between line for joining them
)
points = points if points is not None else []
lines = list(map(lambda x: tuple(x[0]), points))
if not lines:
return lines
index = int(is_horizontal)
lines.sort(key=lambda q: q[index])
corrected = [lines[0]]
for quad in lines[1:]:
if are_collinear(corrected[-1], quad, bool(is_horizontal)):
prev = corrected.pop(-1)
corrected.append(
(
min(prev[0], quad[0]),
min(prev[1], quad[1]),
max(prev[2], quad[2]),
min(prev[3], quad[3]),
)
if is_horizontal
else (
min(prev[0], quad[0]),
max(prev[1], quad[1]),
min(prev[2], quad[2]),
min(prev[3], quad[3]),
)
)
else:
corrected.append(quad)
return corrected
def parse_lines(image: np.array, show=False) -> list[dict[str, list[int]]]:
image = preprocess(image)
# kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (2, 2))
# image = cv2.dilate(image, kernel, iterations=4)
horizontal_line_img = detect_horizontal_lines(image)
vertical_line_img = detect_vertical_lines(image)
horizontal_endpoints = detect_endpoints(horizontal_line_img, is_horizontal=True)
vertical_endpoints = detect_endpoints(vertical_line_img, is_horizontal=False)
def format_quad(
quad: tuple[int, int, int, int], max_x: int, max_y: int
) -> tuple[int, int, int, int]:
x1, y1, x2, y2 = quad
if x1 > (x2 + 5):
x1, y1, x2, y2 = x2, y2, x1, y1
elif y1 > (y2 + 5):
x1, y1, x2, y2 = x2, y2, x1, y1
return {"x1": x1 / max_x, "y1": y1 / max_y, "x2": x2 / max_x, "y2": y2 / max_y}
ymax, xmax = image.shape
return list(
map(
lambda quad: format_quad(quad, xmax, ymax),
horizontal_endpoints + vertical_endpoints,
)
)