What is the best way to extract text contained within a table in a pdf using python?

↘锁芯ラ 提交于 2020-02-23 05:33:02

问题


I'm constructing a program to extract text from a pdf, put it in a structured format, and send it off to a database. I have roughly 1,400 individual pdfs that all follow a similar format, but nuances in the verbiage and plan designs that the documents summarize make it tricky.

I've played around with a couple different pdf readers in python including tabula-py and pdfminer but none of them are quite getting to what I'd like to do. Tabula reads in all of the text very well, however it pulls everything as it explicitly lays horizontally, excluding the fact that some of the text is wrapped in a box. For example, if you open up the sample SBC I have attached where it reads "What is the overall deductible?" Tabula will read in "What is the overall $500/Individual or..." skipping the fact that the word "deductible" is really part of the first sentence. (Note the files I'm working with are pdfs but I've attached a jpeg because I couldn't figure out how to attach a pdf.)

import tabula

df = tabula.read_pdf(*filepath*, pandas_options={'header': None))

print(df.iloc[0][0])
print(df)

In the end, I'd really like to be able to parse out the text within each box so that I can better identify what values belong to deductible, out-of-pocket limts, copays/coinsurance, etc. I thought possibly some sort of OCR would allow me to recognize which parts of the PDF are contained in the blue rectangles and then pull the string from there, but I really don't know where to start with that.Sample SBC


回答1:


I think that the best way to do what you need is to find and isolate the cells in the file and then apply OCR to each individual cell.

There are a number of solutions in SO for that, I got the code from this answer and played around a little with the parameters to get the output below (not perfect yet, but you can tweak it a little bit yourself).

import os
import cv2
import imutils

# This only works if there's only one table on a page
# Important parameters:
#  - morph_size
#  - min_text_height_limit
#  - max_text_height_limit
#  - cell_threshold
#  - min_columns


def pre_process_image(img, save_in_file, morph_size=(23, 23)):

    # get rid of the color
    pre = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    # Otsu threshold
    pre = cv2.threshold(pre, 250, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)[1]
    # dilate the text to make it solid spot
    cpy = pre.copy()
    struct = cv2.getStructuringElement(cv2.MORPH_RECT, morph_size)
    cpy = cv2.dilate(~cpy, struct, anchor=(-1, -1), iterations=1)
    pre = ~cpy

    if save_in_file is not None:
        cv2.imwrite(save_in_file, pre)
    return pre


def find_text_boxes(pre, min_text_height_limit=20, max_text_height_limit=120):
    # Looking for the text spots contours
    contours, _ = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)

    # Getting the texts bounding boxes based on the text size assumptions
    boxes = []
    for contour in contours:
        box = cv2.boundingRect(contour)
        h = box[3]

        if min_text_height_limit < h < max_text_height_limit:
            boxes.append(box)

    return boxes


def find_table_in_boxes(boxes, cell_threshold=100, min_columns=3):
    rows = {}
    cols = {}

    # Clustering the bounding boxes by their positions
    for box in boxes:
        (x, y, w, h) = box
        col_key = x // cell_threshold
        row_key = y // cell_threshold
        cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
        rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]

    # Filtering out the clusters having less than 2 cols
    table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
    # Sorting the row cells by x coord
    table_cells = [list(sorted(tb)) for tb in table_cells]
    # Sorting rows by the y coord
    table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))

    return table_cells


def build_lines(table_cells):
    if table_cells is None or len(table_cells) <= 0:
        return [], []

    max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
    max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]

    max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
    max_y = max_last_row_height_box[1] + max_last_row_height_box[3]

    hor_lines = []
    ver_lines = []

    for box in table_cells:
        x = box[0][0]
        y = box[0][1]
        hor_lines.append((x, y, max_x, y))

    for box in table_cells[0]:
        x = box[0]
        y = box[1]
        ver_lines.append((x, y, x, max_y))

    (x, y, w, h) = table_cells[0][-1]
    ver_lines.append((max_x, y, max_x, max_y))
    (x, y, w, h) = table_cells[0][0]
    hor_lines.append((x, max_y, max_x, max_y))

    return hor_lines, ver_lines


if __name__ == "__main__":
    in_file = os.path.join(".", "test.jpg")
    pre_file = os.path.join(".", "pre.png")
    out_file = os.path.join(".", "out.png")

    img = cv2.imread(os.path.join(in_file))

    pre_processed = pre_process_image(img, pre_file)
    text_boxes = find_text_boxes(pre_processed)
    cells = find_table_in_boxes(text_boxes)
    hor_lines, ver_lines = build_lines(cells)

    # Visualize the result
    vis = img.copy()

    # for box in text_boxes:
    #     (x, y, w, h) = box
    #     cv2.rectangle(vis, (x, y), (x + w - 2, y + h - 2), (0, 255, 0), 1)

    for line in hor_lines:
        [x1, y1, x2, y2] = line
        cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)

    for line in ver_lines:
        [x1, y1, x2, y2] = line
        cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)

    cv2.imwrite(out_file, vis)




回答2:


@jpnadas In this case the code you copied from my answer in this post isn't really suitable because it addresses the case when a table doesn't have surrounding grid. That algorithm looks for repeating blocks of texts and tries to find a pattern that resembles a table heuristically.

But in this particular case the table does have the grid and by taking this advantage we can achieve a lot more accurate result.

The strategy is the following:

  1. Increase image gamma to make the grid darker
  2. Get rid of colour and apply Otsu thresholding
  3. Find long vertical an horizontal lines in the image and create a mask from it using erode and dilate functions
  4. Find the cell blocks in the mask using findContours function.
  5. Find table objects

    5.1 The rest can be as in the post about finding a table without the grid: find table structure heuristically

    5.2 Alternative approach could be using hierarchy returned by the findContours function. This approach is even more accurate and allows to find multiple tables on a single image.

  6. Having cell coordinates it's easy to extract certain cell image from the original image:

    cell_image = image[cell_y:cell_y + cell_h, cell_x:cell_x + cell_w]

  7. Apply OCR to each cell_image.

BUT! I consider the OpenCV approach as a last resort when you're not able to read the PDF's contents: for instance in case when a PDF contains raster image inside.

If it's a vector-based PDF and its contents are readable it makes more sense to find the table inside contents and just read the text from it instead of doing heavy 'OCR lifting'.

Here's the code for reference for more accurate table recognition:

import os
import imutils
import numpy as np
import argparse
import cv2


def gamma_correction(image, gamma = 1.0):
    look_up_table = np.empty((1,256), np.uint8)

    for i in range(256):
        look_up_table[0,i] = np.clip(pow(i / 255.0, gamma) * 255.0, 0, 255)

    result = cv2.LUT(image, look_up_table)

    return result


def pre_process_image(image):
    # Let's get rid of color first

    # Applying gamma to make the table lines darker
    gamma = gamma_correction(image, 2)

    # Getting rid of color
    gray = cv2.cvtColor(gamma, cv2.COLOR_BGR2GRAY)

    # Then apply Otsu threshold to reveal important areas
    ret, thresh = cv2.threshold(gray, 0, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU)

    # inverting the thresholded image
    return ~thresh


def get_horizontal_lines_mask(image, horizontal_size=100):

    horizontal = image.copy()
    horizontal_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (horizontal_size, 1))
    horizontal = cv2.erode(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)
    horizontal = cv2.dilate(horizontal, horizontal_structure, anchor=(-1, -1), iterations=1)

    return horizontal


def get_vertical_lines_mask(image, vertical_size=100):
    vertical = image.copy()
    vertical_structure = cv2.getStructuringElement(cv2.MORPH_RECT, (1, vertical_size))
    vertical = cv2.erode(vertical, vertical_structure, anchor=(-1, -1), iterations=1)
    vertical = cv2.dilate(vertical, vertical_structure, anchor=(-1, -1), iterations=1)

    return vertical


def make_lines_mask(preprocessed, min_horizontal_line_size=100, min_vertical_line_size=100):

    hor = get_horizontal_lines_mask(preprocessed, min_horizontal_line_size)
    ver = get_vertical_lines_mask(preprocessed, min_vertical_line_size)

    mask = np.zeros((preprocessed.shape[0], preprocessed.shape[1], 1), dtype=np.uint8)
    mask = cv2.bitwise_or(mask, hor)
    mask = cv2.bitwise_or(mask, ver)

    return ~mask


def find_cell_boxes(mask):
    # Looking for the text spots contours
    # OpenCV 3
    # img, contours, hierarchy = cv2.findContours(pre, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    # OpenCV 4
    contours = cv2.findContours(mask, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
    contours = imutils.grab_contours(contours)
    contours = sorted(contours, key=cv2.contourArea, reverse=True)

    image_width = mask.shape[1]

    # Getting the texts bounding boxes based on the text size assumptions
    boxes = []
    for contour in contours:
        box = cv2.boundingRect(contour)
        w = box[2]

        # Excluding the page box shape but adding smaller boxes
        if w < 0.95 * image_width:
            boxes.append(box)

    return boxes


def find_table_in_boxes(boxes, cell_threshold=10, min_columns=2):
    rows = {}
    cols = {}

    # Clustering the bounding boxes by their positions
    for box in boxes:
        (x, y, w, h) = box
        col_key = x // cell_threshold
        row_key = y // cell_threshold
        cols[row_key] = [box] if col_key not in cols else cols[col_key] + [box]
        rows[row_key] = [box] if row_key not in rows else rows[row_key] + [box]

    # Filtering out the clusters having less than 2 cols
    table_cells = list(filter(lambda r: len(r) >= min_columns, rows.values()))
    # Sorting the row cells by x coord
    table_cells = [list(sorted(tb)) for tb in table_cells]
    # Sorting rows by the y coord
    table_cells = list(sorted(table_cells, key=lambda r: r[0][1]))

    return table_cells


def build_vertical_lines(table_cells):
    if table_cells is None or len(table_cells) <= 0:
        return [], []

    max_last_col_width_row = max(table_cells, key=lambda b: b[-1][2])
    max_x = max_last_col_width_row[-1][0] + max_last_col_width_row[-1][2]

    max_last_row_height_box = max(table_cells[-1], key=lambda b: b[3])
    max_y = max_last_row_height_box[1] + max_last_row_height_box[3]

    hor_lines = []
    ver_lines = []

    for box in table_cells:
        x = box[0][0]
        y = box[0][1]
        hor_lines.append((x, y, max_x, y))

    for box in table_cells[0]:
        x = box[0]
        y = box[1]
        ver_lines.append((x, y, x, max_y))

    (x, y, w, h) = table_cells[0][-1]
    ver_lines.append((max_x, y, max_x, max_y))
    (x, y, w, h) = table_cells[0][0]
    hor_lines.append((x, max_y, max_x, max_y))

    return hor_lines, ver_lines


if __name__ == "__main__":
    ap = argparse.ArgumentParser()
    ap.add_argument("-i", "--image", required=True, help="path to images directory")
    args = vars(ap.parse_args())

    in_file = args["image"]
    filename_base = in_file.replace(os.path.splitext(in_file)[1], "")

    img = cv2.imread(in_file)

    pre_processed = pre_process_image(img)

    # Visualizing pre-processed image
    cv2.imwrite(filename_base + ".pre.png", pre_processed)

    lines_mask = make_lines_mask(pre_processed, min_horizontal_line_size=1800, min_vertical_line_size=500)

    # Visualizing table lines mask
    cv2.imwrite(filename_base + ".mask.png", lines_mask)

    cell_boxes = find_cell_boxes(lines_mask)

    cells = find_table_in_boxes(cell_boxes)

    # apply OCR to each cell rect here
    # the cells array contains cell coordinates in tuples (x, y, w, h)

    hor_lines, ver_lines = build_vertical_lines(cells)

    # Visualize the table lines
    vis = img.copy()

    for line in hor_lines:
        [x1, y1, x2, y2] = line
        cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)

    for line in ver_lines:
        [x1, y1, x2, y2] = line
        cv2.line(vis, (x1, y1), (x2, y2), (0, 0, 255), 1)

    cv2.imwrite(filename_base + ".result.png", vis)

Some parameters are hard-coded:

  • page size threshold - 0.95

  • min horizontal line size - 1800 px

  • min vertical line size - 500 px

You can provide them as configurable parameters or make them relative to image size.

Results:



来源:https://stackoverflow.com/questions/57210148/what-is-the-best-way-to-extract-text-contained-within-a-table-in-a-pdf-using-pyt

易学教程内所有资源均来自网络或用户发布的内容,如有违反法律规定的内容欢迎反馈
该文章没有解决你所遇到的问题?点击提问,说说你的问题,让更多的人一起探讨吧!