Merge in RR/cv-analysis from new_pyinfra to master
Squashed commit of the following:
commit f7a01a90aad1c402ac537de5bdf15df628ad54df
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Wed Jul 27 10:40:59 2022 +0200
fix typo
commit ff4d549fac5b612c2d391ae85823c5eca1e91916
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Wed Jul 27 10:34:04 2022 +0200
adjust build scripts for new pyinfra
commit ecd70f60d46406d8b6cc7f36a1533d706c917ca8
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Wed Jul 27 09:42:55 2022 +0200
simplify logging by using default configurations
commit 20193c14c940eed2b0a7a72058167e26064119d0
Author: Julius Unverfehrt <julius.unverfehrt@iqser.com>
Date: Tue Jul 26 17:16:57 2022 +0200
tidy-up, refactor config logic to not dependent on external files
commit d8069cd4d404a570bb04a04278161669d1c83332
Author: Isaac Riley <Isaac.Riley@iqser.com>
Date: Tue Jul 26 15:14:59 2022 +0200
update pyinfra
commit c3bc11037cca9baf016043ab997c566f5b4a2586
Author: Isaac Riley <Isaac.Riley@iqser.com>
Date: Tue Jul 26 15:09:14 2022 +0200
repair tests
commit 6f4e4f2863ee16ae056c1d432f663858c5f10221
Author: Isaac Riley <Isaac.Riley@iqser.com>
Date: Tue Jul 26 14:52:38 2022 +0200
updated server logic to work with new pyinfra; update scripts for pyinfra as submodule
commit 2a18dba81de5ee84d0bdf0e77f478693e8d8aef4
Author: Isaac Riley <Isaac.Riley@iqser.com>
Date: Tue Jul 26 14:10:41 2022 +0200
formatting
commit d87ce9328de9aa2341228af9b24473d5e583504e
Author: Isaac Riley <Isaac.Riley@iqser.com>
Date: Tue Jul 26 14:10:11 2022 +0200
make server logic compatible with new pyinfra
114 lines
3.2 KiB
Python
114 lines
3.2 KiB
Python
import hashlib
|
|
import json
|
|
import os
|
|
from itertools import chain
|
|
from os import path
|
|
|
|
import pandas as pd
|
|
from pdf2image import convert_from_path
|
|
|
|
from cv_analysis.config import get_config
|
|
|
|
CV_CONFIG = get_config()
|
|
|
|
|
|
def read_json(path):
|
|
with open(path, encoding="utf-8") as file:
|
|
data = json.load(file)
|
|
return data
|
|
|
|
|
|
def collect_metadata(example_pages, save=False):
|
|
metadata = []
|
|
make_metadata_entry = make_metadata_entry_maker()
|
|
for name, document_sections in example_pages.items():
|
|
metadata.append(f(name, document_sections, make_metadata_entry))
|
|
metadata = list(chain.from_iterable(metadata))
|
|
if save:
|
|
df = pd.DataFrame(data=metadata, columns=["image_name", "pdf_name", "page"])
|
|
df.to_csv(path.join(CV_CONFIG.dvc_data_dir, "metadata_testing_files.csv"))
|
|
else:
|
|
return pd.DataFrame(data=metadata, columns=["image_name", "pdf_name", "page"])
|
|
|
|
|
|
def f(name, document_sections, make_metadata_entry):
|
|
for pages in document_sections:
|
|
span = list(range(pages[0], pages[1] + 1))
|
|
for page_nr in span:
|
|
yield make_metadata_entry(name, page_nr)
|
|
|
|
|
|
def make_metadata_entry_maker():
|
|
i = -1
|
|
|
|
def make_metadata_entry(name, page_nr):
|
|
nonlocal i
|
|
i += 1
|
|
return [f"fig_table{i:0>3}", name, page_nr]
|
|
|
|
return make_metadata_entry
|
|
|
|
|
|
def split_pdf(example_pages):
|
|
dir_path = CV_CONFIG.pdf_for_testing
|
|
i = 0
|
|
for name, document_sections in example_pages.items():
|
|
for pages in document_sections:
|
|
images = convert_from_path(
|
|
pdf_path=path.join(dir_path, name), dpi=300, first_page=pages[0], last_page=pages[1]
|
|
)
|
|
for image in images:
|
|
fp = path.join(CV_CONFIG.png_for_testing, f"fig_table{i:0>3}.png")
|
|
image.save(fp=fp, dpi=(300, 300))
|
|
i += 1
|
|
|
|
|
|
def find_hash(file_path):
|
|
BLOCK_SIZE = 65536
|
|
|
|
file_hash = hashlib.sha256()
|
|
with open(file_path, "rb") as f:
|
|
fb = f.read(BLOCK_SIZE)
|
|
while len(fb) > 0:
|
|
file_hash.update(fb)
|
|
fb = f.read(BLOCK_SIZE)
|
|
|
|
return file_hash.hexdigest()
|
|
|
|
|
|
def rename_files_with_hash(example_pages):
|
|
files_to_rename = list(example_pages.keys())
|
|
folder = CV_CONFIG.hashed_pdfs_for_testing
|
|
|
|
# Iterate through the folder
|
|
for file in os.listdir(folder):
|
|
# Checking if the file is present in the list
|
|
if file in files_to_rename:
|
|
# construct current name using file name and path
|
|
old_name = path.join(folder, file)
|
|
# get file name without extension
|
|
only_name = path.splitext(file)[0]
|
|
|
|
# Adding the new name with extension
|
|
hash = find_hash(old_name)
|
|
# construct full file path
|
|
new_name = path.join(folder, hash + ".pdf")
|
|
|
|
# Renaming the file
|
|
os.rename(old_name, new_name)
|
|
|
|
# verify the result
|
|
res = os.listdir(folder)
|
|
print(res)
|
|
|
|
|
|
def main():
|
|
examples_pages = read_json(path.join(CV_CONFIG.test_data_dir, "example_pages.json"))
|
|
rename_files_with_hash(examples_pages)
|
|
# collect_metadata(examples_pages, save=True)
|
|
# split_pdf(examples_pages)
|
|
|
|
|
|
if __name__ == "__main__":
|
|
main()
|