different approaches to isolate line components of tables in scanned pdf files.

This commit is contained in:
llocarnini 2022-02-16 12:37:17 +01:00
parent c2faf7d00b
commit 57ca47f38d
2 changed files with 86 additions and 34 deletions

1
.gitignore vendored
View File

@ -1 +1,2 @@
/pdfs/
/results/

View File

@ -10,10 +10,8 @@ from vidocp.utils.display import show_mpl
from vidocp.utils.draw import draw_rectangles
from vidocp.utils.post_processing import xywh_to_vecs, xywh_to_vec_rect, adjacent1d, remove_isolated
import matplotlib.pyplot as plt
def add_external_contours(image, img):
contours, _ = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
# contours = filter(partial(is_large_enough, min_area=5000000), contours)
@ -24,46 +22,107 @@ def add_external_contours(image, img):
return image
def process_lines(img_bin_h, img_bin_v):
def draw_lines(lines, img_bin):
for line in lines:
def process_lines(img_line_component):
def draw_lines(detected_lines, img_bin):
for line in detected_lines:
for x1, y1, x2, y2 in line:
cv2.line(img_bin, (x1, y1), (x2, y2), (255, 255, 255), 6)
return img_bin
lines_h = cv2.HoughLinesP(img_bin_h, 1, np.pi / 180, 500, 500, 250)
draw_lines(lines_h, img_bin_h)
lines_v = cv2.HoughLinesP(img_bin_v, 0.7, np.pi / 180, 500, 500, 250)
draw_lines(lines_v,img_bin_v)
lines = cv2.HoughLines(img_line_component, 1, np.pi / 180, 500)
draw_lines(lines, lines)
return img_line_component
# def isolate_vertical_and_horizontal_components(img_bin):
# line_min_width = 50
# kernel_h = np.ones((1, line_min_width), np.uint8)
# kernel_v = np.ones((line_min_width, 1), np.uint8)
#
# img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
# img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
# show_mpl(img_bin_h | img_bin_v)
#
# img_bin_h = apply_motion_blur(img_bin_h, 140, 0)
# img_bin_v = apply_motion_blur(img_bin_v, 140, 90)
# show_mpl(img_bin_h | img_bin_v)
#
# th1, img_bin_h = cv2.threshold(img_bin_h, 95, 255, cv2.THRESH_BINARY)
# th1, img_bin_v = cv2.threshold(img_bin_v, 95, 255, cv2.THRESH_BINARY)
# show_mpl(img_bin_h | img_bin_v)
#
# kernel_h = np.ones((1, 8), np.uint8)
# kernel_v = np.ones((8, 1), np.uint8)
# img_bin_h = cv2.dilate(img_bin_h, kernel_h, iterations=4)
# img_bin_v = cv2.dilate(img_bin_v, kernel_v, iterations=4)
#
# img_bin_final = img_bin_h | img_bin_v
# show_mpl(img_bin_final)
# # th 130
# #th1, img_bin_final = cv2.threshold(img_bin_final, 90, 255, cv2.THRESH_BINARY)
# #show_mpl(img_bin_final)
# return img_bin_final
return img_bin_h, img_bin_v
def isolate_vertical_and_horizontal_components(img_bin):
line_min_width = 30
kernel_h = np.ones((1, line_min_width), np.uint8)
kernel_v = np.ones((line_min_width, 1), np.uint8)
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
show_mpl(img_bin_h | img_bin_v)
img_bin_h = cv2.dilate(img_bin_h, kernel_h, 1)
img_bin_v = cv2.dilate(img_bin_v, kernel_v, 1)
img_bin_h = apply_motion_blur(img_bin_h, 150, 0)
img_bin_v = apply_motion_blur(img_bin_v, 150, 90)
show_mpl(img_bin_h | img_bin_v)
th1, img_bin_h = cv2.threshold(img_bin_h, 70, 255, cv2.THRESH_BINARY)
th1, img_bin_v = cv2.threshold(img_bin_v, 70, 255, cv2.THRESH_BINARY)
show_mpl(img_bin_h | img_bin_v)
kernel_h = np.ones((1, 10), np.uint8)
kernel_v = np.ones((10, 1), np.uint8)
img_bin_h = cv2.erode(img_bin_h, kernel_h, iterations=1)
img_bin_v = cv2.erode(img_bin_v, kernel_v, iterations=1)
img_bin_h = apply_motion_blur(img_bin_h, 100, 0)
img_bin_v = apply_motion_blur(img_bin_v, 100, 90)
# img_bin_h, img_bin_v = process_lines(img_bin_h,img_bin_v)
img_bin_final = img_bin_h | img_bin_v
kernel = np.ones((5, 5), np.uint8)
# img_bin_final = cv2.dilate(img_bin_final, kernel, 2)
th1, img_bin_final = cv2.threshold(img_bin_final, 10, 255, cv2.THRESH_BINARY)
show_mpl(img_bin_final)
# th 130
# th1, img_bin_final = cv2.threshold(img_bin_final, 150, 255, cv2.THRESH_BINARY)
# show_mpl(img_bin_final)
return img_bin_final
# def isolate_vertical_and_horizontal_components(img_bin):
# line_min_width = 30
# kernel_h = np.ones((1, line_min_width), np.uint8)
# kernel_v = np.ones((line_min_width, 1), np.uint8)
#
# img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
# img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
# show_mpl(img_bin_h | img_bin_v)
#
# kernel_h = np.ones((1, 30), np.uint8)
# kernel_v = np.ones((30, 1), np.uint8)
# img_bin_h = cv2.dilate(img_bin_h, kernel_h, iterations=1)
# img_bin_v = cv2.dilate(img_bin_v, kernel_v, iterations=1)
# show_mpl(img_bin_h | img_bin_v)
#
# img_bin_h = apply_motion_blur(img_bin_h, 100, 0)
# img_bin_v = apply_motion_blur(img_bin_v, 100, 90)
#
# img_bin_final = img_bin_h | img_bin_v
# show_mpl(img_bin_final)
# # th 130
# th1, img_bin_final = cv2.threshold(img_bin_final, 125, 255, cv2.THRESH_BINARY)
# show_mpl(img_bin_final)
#
# return img_bin_final
# FIXME: does not work yet
def has_table_shape(rects):
assert isinstance(rects, list)
points = list(chain(*map(xywh_to_vecs, rects)))
@ -96,29 +155,24 @@ def has_table_shape(rects):
)
def apply_motion_blur(image, size, angle):
k = np.zeros((size, size), dtype=np.float32)
k[ (size-1)// 2 , :] = np.ones(size, dtype=np.float32)
k = cv2.warpAffine(k, cv2.getRotationMatrix2D( (size / 2 -0.5 , size / 2 -0.5 ) , angle, 1.0), (size, size) )
k = k * ( 1.0 / np.sum(k) )
k[(size - 1) // 2, :] = np.ones(size, dtype=np.float32)
k = cv2.warpAffine(k, cv2.getRotationMatrix2D((size / 2 - 0.5, size / 2 - 0.5), angle, 1.0), (size, size))
k = k * (1.0 / np.sum(k))
return cv2.filter2D(image, -1, k)
def parse_table(image: np.array):
def is_large_enough(stat):
x1, y1, w, h, area = stat
# was set too high (3000): Boxes in a Table can be smaller. example: a column titled "No." This cell has approximatly an area of 500 px based on 11pt letters
# with extra condition for the length of height and width weirdly narrow rectangles can be filtered
return area > 500 and w > 35 and h > 15
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur_gray_scale = cv2.GaussianBlur(gray_scale, (5, 5), 1, borderType=cv2.BORDER_REPLICATE)
th1, img_bin = cv2.threshold(blur_gray_scale, 195, 255, cv2.THRESH_BINARY)
show_mpl(img_bin)
# changed threshold value from 150 to 195 because of a shaded edgecase table
# th1, img_bin = cv2.threshold(gray_scale, 195, 255, cv2.THRESH_BINARY)
# blur_gray_scale = cv2.GaussianBlur(gray_scale, (5, 5), 1, borderType=cv2.BORDER_REPLICATE)
th1, img_bin = cv2.threshold(gray_scale, 195, 255, cv2.THRESH_BINARY)
img_bin = ~img_bin
show_mpl(img_bin)
img_bin = isolate_vertical_and_horizontal_components(img_bin)
img_bin_final = add_external_contours(img_bin, img_bin)
@ -134,13 +188,10 @@ def parse_table(image: np.array):
# if not has_table_shape(rects):
# return False
return rects
def annotate_tables_in_pdf(pdf_path, page_index=1):
page = pdf2image.convert_from_path(pdf_path, first_page=page_index + 1, last_page=page_index + 1)[0]
page = np.array(page)