228 lines
7.4 KiB
Python
228 lines
7.4 KiB
Python
import atexit
|
|
import io
|
|
import json
|
|
import traceback
|
|
from functools import partial, lru_cache
|
|
from itertools import chain, starmap, filterfalse
|
|
from operator import itemgetter, truth
|
|
from typing import Iterable, Iterator, List
|
|
|
|
import fitz
|
|
from PIL import Image
|
|
from funcy import merge, pluck, curry, compose, rcompose
|
|
|
|
from image_prediction.formatter.formatters.enum import EnumFormatter
|
|
from image_prediction.image_extractor.extractor import ImageExtractor, ImageMetadataPair
|
|
from image_prediction.image_extractor.filters import filter_metadata_for_scanned_pages
|
|
from image_prediction.info import Info
|
|
from image_prediction.stitching.stitching import stitch_pairs
|
|
from image_prediction.stitching.utils import validate_box_coords, validate_box_size
|
|
from image_prediction.utils import get_logger
|
|
from image_prediction.utils.generic import lift
|
|
|
|
logger = get_logger()
|
|
|
|
|
|
class ParsablePDFImageExtractor(ImageExtractor):
|
|
def __init__(self, verbose=False, tolerance=0):
|
|
"""
|
|
|
|
Args:
|
|
verbose: Whether to show progressbar
|
|
tolerance: The tolerance in pixels for the distance between images, beyond which they will not be stitched
|
|
together
|
|
"""
|
|
self.doc: fitz.fitz.Document = None
|
|
self.verbose = verbose
|
|
self.tolerance = tolerance
|
|
|
|
def extract(self, pdf: bytes, page_range: range = None):
|
|
self.doc = fitz.Document(stream=pdf)
|
|
|
|
pages = extract_pages(self.doc, page_range) if page_range else self.doc
|
|
|
|
image_metadata_pairs = chain.from_iterable(map(self.__process_images_on_page, pages))
|
|
|
|
yield from image_metadata_pairs
|
|
|
|
def __process_images_on_page(self, page: fitz.fitz.Page):
|
|
metadata = list(get_metadata_for_images_on_page(page))
|
|
metadata = filter_metadata_for_scanned_pages(metadata)
|
|
metadata = list(filter_out_tiny_images(metadata))
|
|
metadata = list(filter_invalid_metadata(metadata))
|
|
|
|
metadata = add_alpha_channel_info(self.doc, page, metadata)
|
|
|
|
images = get_images_on_page(self.doc, metadata)
|
|
|
|
clear_caches()
|
|
|
|
image_metadata_pairs = starmap(ImageMetadataPair, filter(all, zip(images, metadata)))
|
|
# TODO: In the future, consider to introduce an image validator as a pipeline component rather than doing the
|
|
# validation here. Invalid images can then be split into a different stream and joined with the intact images
|
|
# again for the formatting step.
|
|
image_metadata_pairs = self.__filter_valid_images(image_metadata_pairs)
|
|
image_metadata_pairs = stitch_pairs(list(image_metadata_pairs), tolerance=self.tolerance)
|
|
|
|
yield from image_metadata_pairs
|
|
|
|
@staticmethod
|
|
def __filter_valid_images(image_metadata_pairs: Iterable[ImageMetadataPair]) -> Iterator[ImageMetadataPair]:
|
|
def validate(image: Image.Image, metadata: dict):
|
|
try:
|
|
# TODO: stand-in heuristic for testing if image is valid => find cleaner solution (RED-5148)
|
|
image.resize((100, 100)).convert("RGB")
|
|
return ImageMetadataPair(image, metadata)
|
|
except (OSError, Exception) as err:
|
|
metadata = json.dumps(EnumFormatter()(metadata), indent=2)
|
|
logger.warning(f"Invalid image encountered. Image metadata:\n{metadata}\n\n{traceback.format_exc()}")
|
|
return None
|
|
|
|
return filter(truth, starmap(validate, image_metadata_pairs))
|
|
|
|
|
|
def extract_pages(doc, page_range):
|
|
page_range = range(page_range.start + 1, page_range.stop + 1)
|
|
pages = map(doc.load_page, page_range)
|
|
|
|
yield from pages
|
|
|
|
|
|
def get_images_on_page(doc, metadata):
|
|
xrefs = pluck(Info.XREF, metadata)
|
|
images = map(partial(xref_to_image, doc), xrefs)
|
|
|
|
yield from images
|
|
|
|
|
|
def get_metadata_for_images_on_page(page: fitz.Page):
|
|
|
|
metadata = map(get_image_metadata, get_image_infos(page))
|
|
|
|
metadata = add_page_metadata(page, metadata)
|
|
|
|
yield from metadata
|
|
|
|
|
|
def filter_invalid_metadata(metadata):
|
|
return compose(validate_size_and_passthrough, validate_coords_and_passthrough)(metadata)
|
|
|
|
|
|
# def get_metadata_for_images_on_page_2(page: fitz.fitz.Page):
|
|
# """Effectively the same as image_prediction.image_extractor.extractors.parsable.get_metadata_for_images_on_page,
|
|
# however without the validation steps since not required here and take a significant amount of time.
|
|
# """
|
|
# # temporary solution to avoid circular imports without changing the original code
|
|
# from image_prediction.image_extractor.extractors.parsable import get_image_metadata, add_page_metadata
|
|
#
|
|
# image_infos = page.get_image_info(xrefs=True)
|
|
# metadata = lmap(get_image_metadata, image_infos)
|
|
# metadata = add_page_metadata(page, metadata)
|
|
#
|
|
# return metadata
|
|
|
|
|
|
@lru_cache(maxsize=None)
|
|
def get_image_infos(page: fitz.Page) -> List[dict]:
|
|
return page.get_image_info(xrefs=True)
|
|
|
|
|
|
@lru_cache(maxsize=None)
|
|
def xref_to_image(doc, xref) -> Image:
|
|
maybe_image = load_image_handle_from_xref(doc, xref)
|
|
return Image.open(io.BytesIO(maybe_image["image"])) if maybe_image else None
|
|
|
|
|
|
def get_image_metadata(image_info):
|
|
|
|
xref, coords = itemgetter("xref", "bbox")(image_info)
|
|
x1, y1, x2, y2 = map(rounder, coords)
|
|
|
|
width = abs(x2 - x1)
|
|
height = abs(y2 - y1)
|
|
|
|
return {
|
|
Info.WIDTH: width,
|
|
Info.HEIGHT: height,
|
|
Info.X1: x1,
|
|
Info.X2: x2,
|
|
Info.Y1: y1,
|
|
Info.Y2: y2,
|
|
Info.XREF: xref,
|
|
}
|
|
|
|
|
|
def validate_coords_and_passthrough(metadata):
|
|
yield from map(validate_box_coords, metadata)
|
|
|
|
|
|
def filter_out_tiny_images(metadata):
|
|
yield from filterfalse(tiny, metadata)
|
|
|
|
|
|
def validate_size_and_passthrough(metadata):
|
|
yield from map(validate_box_size, metadata)
|
|
|
|
|
|
def add_page_metadata(page, metadata):
|
|
yield from map(partial(merge, get_page_metadata(page)), metadata)
|
|
|
|
|
|
def add_alpha_channel_info(doc, page, metadata):
|
|
|
|
page_to_xrefs = compose(curry(pluck)("xref"), get_image_infos)
|
|
xref_to_alpha = partial(has_alpha_channel, doc)
|
|
page_to_alpha_value_per_image = compose(lift(xref_to_alpha), page_to_xrefs)
|
|
alpha_to_dict = compose(dict, lambda a: [(Info.ALPHA, a)])
|
|
page_to_alpha_mapping_per_image = compose(lift(alpha_to_dict), page_to_alpha_value_per_image)
|
|
|
|
metadata = starmap(merge, zip(metadata, page_to_alpha_mapping_per_image(page)))
|
|
|
|
yield from metadata
|
|
|
|
|
|
@lru_cache(maxsize=None)
|
|
def load_image_handle_from_xref(doc, xref):
|
|
return doc.extract_image(xref)
|
|
|
|
|
|
rounder = rcompose(round, int)
|
|
|
|
|
|
def get_page_metadata(page):
|
|
page_width, page_height = map(rounder, page.mediabox_size)
|
|
|
|
return {
|
|
Info.PAGE_WIDTH: page_width,
|
|
Info.PAGE_HEIGHT: page_height,
|
|
Info.PAGE_IDX: page.number,
|
|
}
|
|
|
|
|
|
def has_alpha_channel(doc, xref):
|
|
|
|
maybe_image = load_image_handle_from_xref(doc, xref)
|
|
maybe_smask = maybe_image["smask"] if maybe_image else None
|
|
|
|
if maybe_smask:
|
|
return any([doc.extract_image(maybe_smask) is not None, bool(fitz.Pixmap(doc, maybe_smask).alpha)])
|
|
else:
|
|
try:
|
|
return bool(fitz.Pixmap(doc, xref).alpha)
|
|
except ValueError:
|
|
logger.debug(f"Encountered invalid xref `{xref}` in {doc.metadata.get('title', '<no title>')}.")
|
|
return False
|
|
|
|
|
|
def tiny(metadata):
|
|
return metadata[Info.WIDTH] * metadata[Info.HEIGHT] <= 4
|
|
|
|
|
|
def clear_caches():
|
|
get_image_infos.cache_clear()
|
|
load_image_handle_from_xref.cache_clear()
|
|
xref_to_image.cache_clear()
|
|
|
|
|
|
atexit.register(clear_caches)
|