Merge branch 'master' into containerization
This commit is contained in:
commit
bdd2146330
16
.gitignore
vendored
16
.gitignore
vendored
@ -3,3 +3,19 @@ __pycache__/
|
||||
deskew_model/
|
||||
/pdfs/
|
||||
/results/
|
||||
/pdfs/
|
||||
/env/
|
||||
/.idea/
|
||||
/.idea/.gitignore
|
||||
/.idea/misc.xml
|
||||
/.idea/inspectionProfiles/profiles_settings.xml
|
||||
/.idea/table_parsing.iml
|
||||
/.idea/vcs.xml
|
||||
/results/
|
||||
/data
|
||||
/table_parsing.egg-info
|
||||
/tests/VV-313450.pdf
|
||||
/vidocp.egg-info/dependency_links.txt
|
||||
/vidocp.egg-info/PKG-INFO
|
||||
/vidocp.egg-info/SOURCES.txt
|
||||
/vidocp.egg-info/top_level.txt
|
||||
|
||||
29
tests/test_table_parsing.py
Normal file
29
tests/test_table_parsing.py
Normal file
@ -0,0 +1,29 @@
|
||||
import pytest
|
||||
from vidocp.table_parsing import parse_table
|
||||
import numpy as np
|
||||
import pdf2image
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def rects():
|
||||
page_index = 0
|
||||
pdf_path = "/home/lillian/vidocp/tests/VV-313450.pdf"
|
||||
page = pdf2image.convert_from_path(pdf_path, first_page=page_index + 1, last_page=page_index + 1)[0]
|
||||
page = np.array(page)
|
||||
rectangles = parse_table(page)
|
||||
return rectangles
|
||||
|
||||
|
||||
def test_num_of_rects(rects):
|
||||
assert len(rects) == 49
|
||||
|
||||
|
||||
def test_range_of_rects(rects):
|
||||
expected_range = ((210, 605), (1430, 1620))
|
||||
topleft = min(rects)
|
||||
x,y,w,h = max(rects)
|
||||
bottomright = (x+w, y+h)
|
||||
|
||||
assert topleft >= expected_range[0]
|
||||
assert bottomright <= expected_range[1]
|
||||
|
||||
170
vidocp/table_parsig.py
Normal file
170
vidocp/table_parsig.py
Normal file
@ -0,0 +1,170 @@
|
||||
from itertools import count
|
||||
|
||||
import cv2
|
||||
import imutils
|
||||
import numpy as np
|
||||
import pdf2image
|
||||
from matplotlib import pyplot as plt
|
||||
|
||||
|
||||
def parse(image: np.array):
|
||||
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
#plt.imshow(gray_scale)
|
||||
blurred = cv2.GaussianBlur(gray_scale, (7, 7), 2) #5 5 1
|
||||
thresh = cv2.threshold(blurred, 251, 255, cv2.THRESH_BINARY)[1]
|
||||
#plt.imshow(thresh)
|
||||
img_bin = ~thresh
|
||||
|
||||
line_min_width = 7
|
||||
kernel_h = np.ones((10, line_min_width), np.uint8)
|
||||
kernel_v = np.ones((line_min_width, 10), np.uint8)
|
||||
|
||||
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
|
||||
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
|
||||
#plt.imshow(img_bin_h)
|
||||
#plt.imshow(img_bin_v)
|
||||
img_bin_final = img_bin_h | img_bin_v
|
||||
plt.imshow(img_bin_final)
|
||||
contours = cv2.findContours(img_bin_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
contours = imutils.grab_contours(contours)
|
||||
for c in contours:
|
||||
peri = cv2.arcLength(c, True)
|
||||
approx = cv2.approxPolyDP(c, 0.04 * peri, True)
|
||||
yield cv2.boundingRect(approx)
|
||||
|
||||
def parse_tables(image: np.array, rects: list):
|
||||
parsed_tables = []
|
||||
for rect in rects:
|
||||
(x,y,w,h) = rect
|
||||
region_of_interest = image[x:x+w, y:y+h]
|
||||
gray = cv2.cvtColor(region_of_interest, cv2.COLOR_BGR2GRAY)
|
||||
thresh = cv2.threshold(gray, 200, 255, cv2.THRESH_BINARY)[1]
|
||||
img_bin = ~thresh
|
||||
|
||||
line_min_width = 5
|
||||
kernel_h = np.ones((1, line_min_width), np.uint8)
|
||||
kernel_v = np.ones((line_min_width, 1), np.uint8)
|
||||
|
||||
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
|
||||
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
|
||||
# find_and_close_internal_gaps(img_bin_v)
|
||||
img_bin_final = img_bin_h | img_bin_v
|
||||
#plt.imshow(img_bin_final)
|
||||
# find_and_close_internal_gaps(img_bin_final)
|
||||
# find_and_close_edges(img_bin_final)
|
||||
|
||||
_, labels, stats, _ = cv2.connectedComponentsWithStats(~img_bin_final, connectivity=8, ltype=cv2.CV_32S)
|
||||
parsed_tables.append([(x,y,w,h), stats])
|
||||
return parsed_tables
|
||||
#yield (x,y,w,h), stats, region_of_interest
|
||||
# return stats
|
||||
|
||||
def annotate_table(image, parsed_tables):
|
||||
for table in parsed_tables:
|
||||
original_coordinates, stats = table
|
||||
stats = filter_unconnected_cells(stats)
|
||||
for stat in stats:
|
||||
x, y, w, h, area = stat
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 255), 2)
|
||||
for i, (s, v) in enumerate(zip(["x", "y", "w", "h"], [x, y, w, h])):
|
||||
anno = f"{s} = {v}"
|
||||
xann = int(x + 5)
|
||||
yann = int(y + h - (20 * (i + 1)))
|
||||
cv2.putText(image, anno, (xann, yann), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
|
||||
def filter_unconnected_cells(stats):
|
||||
filtered_cells = []
|
||||
# print(stats)
|
||||
for left, middle, right in zip(stats[0:], stats[1:],
|
||||
list(stats[2:]) + [np.array([None, None, None, None, None])]):
|
||||
x, y, w, h, area = middle
|
||||
if w > 35 and h > 13 and area > 500:
|
||||
if right[1] is None:
|
||||
if y == left[1] or x == left[0]:
|
||||
filtered_cells.append(middle)
|
||||
else:
|
||||
if y == left[1] or y == right[1] or x == left[0] or x == right[0]:
|
||||
filtered_cells.append(middle)
|
||||
return filtered_cells
|
||||
|
||||
def find_and_close_edges(img_bin_final):
|
||||
contours, hierarchy = cv2.findContours(img_bin_final, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
||||
|
||||
for cnt in contours:
|
||||
missing_external_edges = True
|
||||
left = tuple(cnt[cnt[:, :, 0].argmin()][0])
|
||||
right = tuple(cnt[cnt[:, :, 0].argmax()][0])
|
||||
top = tuple(cnt[cnt[:, :, 1].argmin()][0])
|
||||
bottom = tuple(cnt[cnt[:, :, 1].argmax()][0])
|
||||
topleft = [left[0], top[1]]
|
||||
bottomright = [right[0], bottom[1]]
|
||||
for arr in cnt:
|
||||
if np.array_equal(arr, np.array([bottomright])) or np.array_equal(arr, np.array([topleft])):
|
||||
missing_external_edges = False
|
||||
break
|
||||
|
||||
if missing_external_edges and (bottomright[0] - topleft[0]) * (bottomright[1] - topleft[1]) >= 50000:
|
||||
cv2.rectangle(img_bin_final, tuple(topleft), tuple(bottomright), (255, 255, 255), 2)
|
||||
# print("missing cell detectet rectangle drawn")
|
||||
|
||||
return img_bin_final
|
||||
|
||||
|
||||
|
||||
def parse_tables_in_pdf(pages):
|
||||
return zip(map(parse, pages), count())
|
||||
|
||||
# def annotate_tables_in_pdf(pdf_path, page_index=1):
|
||||
# # timeit()
|
||||
# page = pdf2image.convert_from_path(pdf_path, first_page=page_index + 1, last_page=page_index + 1)[0]
|
||||
# page = np.array(page)
|
||||
#
|
||||
# _, stats = parse(page)
|
||||
# page = annotate_image(page, stats)
|
||||
# # print(timeit())
|
||||
# fig, ax = plt.subplots(1, 1)
|
||||
# fig.set_size_inches(20, 20)
|
||||
# ax.imshow(page)
|
||||
# plt.show()
|
||||
|
||||
|
||||
def annotate_boxes(image, rects):
|
||||
print(type(rects))
|
||||
for rect in rects:
|
||||
(x, y, w, h) = rect
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
||||
|
||||
return image
|
||||
|
||||
def filter_tables_or_images(rects):
|
||||
filtered = []
|
||||
for rect in rects:
|
||||
(x,y,w,h) = rect
|
||||
print(w*h)
|
||||
if w * h > 10**6:
|
||||
filtered.append(rect)
|
||||
print(filtered)
|
||||
return filtered
|
||||
|
||||
|
||||
|
||||
|
||||
def annotate_tables_in_pdf(pdf_path, page_index=1):
|
||||
page = pdf2image.convert_from_path(pdf_path, first_page=page_index + 1, last_page=page_index + 1)[0]
|
||||
page = np.array(page)
|
||||
|
||||
layout_boxes = parse(page)
|
||||
page = annotate_boxes(page, layout_boxes)
|
||||
parsed_tables = parse_tables(page, filter_tables_or_images(layout_boxes))
|
||||
page = annotate_table(page, parsed_tables)
|
||||
|
||||
|
||||
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
fig.set_size_inches(20, 20)
|
||||
ax.imshow(page)
|
||||
plt.show()
|
||||
@ -25,6 +25,7 @@ def add_external_contours(image, img):
|
||||
return image
|
||||
|
||||
|
||||
|
||||
def isolate_vertical_and_horizontal_components(img_bin, bounding_rects):
|
||||
line_min_width = 48
|
||||
kernel_h = np.ones((1, line_min_width), np.uint8)
|
||||
@ -38,7 +39,7 @@ def isolate_vertical_and_horizontal_components(img_bin, bounding_rects):
|
||||
kernel_v = np.ones((30, 1), np.uint8)
|
||||
img_bin_h = cv2.dilate(img_bin_h, kernel_h, iterations=2)
|
||||
img_bin_v = cv2.dilate(img_bin_v, kernel_v, iterations=2)
|
||||
show_mpl(img_bin_h | img_bin_v)
|
||||
# show_mpl(img_bin_h | img_bin_v)
|
||||
|
||||
# reduced filtersize from 100 to 80 to minimize splitting narrow cells
|
||||
img_bin_h = apply_motion_blur(img_bin_h, 80, 0)
|
||||
@ -49,10 +50,11 @@ def isolate_vertical_and_horizontal_components(img_bin, bounding_rects):
|
||||
# changed threshold from 110 to 120 to minimize cell splitting
|
||||
th1, img_bin_final = cv2.threshold(img_bin_final, 120, 255, cv2.THRESH_BINARY)
|
||||
img_bin_final = cv2.dilate(img_bin_final, np.ones((1, 1), np.uint8), iterations=1)
|
||||
show_mpl(img_bin_final)
|
||||
# show_mpl(img_bin_final)
|
||||
|
||||
# problem if layout parser detects too big of a layout box as in VV-748542.pdf p.22
|
||||
img_bin_final = disconnect_non_existing_cells(img_bin_final, bounding_rects)
|
||||
show_mpl(img_bin_final)
|
||||
# show_mpl(img_bin_final)
|
||||
|
||||
return img_bin_final
|
||||
|
||||
|
||||
74
vidocp/table_parsing_2.py
Normal file
74
vidocp/table_parsing_2.py
Normal file
@ -0,0 +1,74 @@
|
||||
import cv2
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
from pdf2image import pdf2image
|
||||
|
||||
|
||||
def add_external_contours(image, img):
|
||||
|
||||
contours, hierarchy = cv2.findContours(img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)
|
||||
|
||||
for cnt in contours:
|
||||
x, y, w, h = cv2.boundingRect(cnt)
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), 255, 1)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def isolate_vertical_and_horizontal_components(img_bin):
|
||||
|
||||
line_min_width = 30
|
||||
kernel_h = np.ones((1, line_min_width), np.uint8)
|
||||
kernel_v = np.ones((line_min_width, 1), np.uint8)
|
||||
|
||||
img_bin_h = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_h)
|
||||
img_bin_v = cv2.morphologyEx(img_bin, cv2.MORPH_OPEN, kernel_v)
|
||||
|
||||
img_bin_final = img_bin_h | img_bin_v
|
||||
|
||||
return img_bin_final
|
||||
|
||||
|
||||
def annotate_image(image, stats):
|
||||
|
||||
image = image.copy()
|
||||
|
||||
for x, y, w, h, area in stats[2:]:
|
||||
if w > 10 and h > 10:
|
||||
cv2.rectangle(image, (x, y), (x + w, y + h), (255, 0, 255), 2)
|
||||
|
||||
for i, (s, v) in enumerate(zip(["x", "y", "w", "h"], [x, y, w, h])):
|
||||
anno = f"{s} = {v}"
|
||||
xann = int(x + 5)
|
||||
yann = int(y + h - (20 * (i + 1)))
|
||||
cv2.putText(image, anno, (xann, yann), cv2.FONT_HERSHEY_SIMPLEX, 0.7, (255, 0, 255), 2)
|
||||
|
||||
return image
|
||||
|
||||
|
||||
def parse_table(image: np.array):
|
||||
|
||||
gray_scale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
||||
th1, img_bin = cv2.threshold(gray_scale, 150, 255, cv2.THRESH_BINARY)
|
||||
img_bin = ~img_bin
|
||||
|
||||
img_bin = isolate_vertical_and_horizontal_components(img_bin)
|
||||
img_bin_final = add_external_contours(img_bin, img_bin)
|
||||
|
||||
_, labels, stats, _ = cv2.connectedComponentsWithStats(~img_bin_final, connectivity=8, ltype=cv2.CV_32S)
|
||||
|
||||
return stats
|
||||
|
||||
|
||||
def annotate_tables_in_pdf(pdf_path, page_index=1):
|
||||
|
||||
page = pdf2image.convert_from_path(pdf_path, first_page=page_index + 1, last_page=page_index + 1)[0]
|
||||
page = np.array(page)
|
||||
|
||||
stats = parse_table(page)
|
||||
page = annotate_image(page, stats)
|
||||
|
||||
fig, ax = plt.subplots(1, 1)
|
||||
fig.set_size_inches(20, 20)
|
||||
ax.imshow(page)
|
||||
plt.show()
|
||||
Loading…
x
Reference in New Issue
Block a user