Skip to content

landingai.visualize

The landingai.visualize module contains functions to visualize the prediction results.

overlay_bboxes(predictions, image, options=None)

Draw bounding boxes on the input image and return the image with bounding boxes drawn. The bounding boxes are drawn using the bbox-visualizer package.

Parameters

predictions A list of ObjectDetectionPrediction, each of which contains the bounding box and the predicted class. image The source image to draw the bounding boxes on. options Options to customize the drawing. Currently, it supports the following options: 1. bbox_style: str, the style of the bounding box. - "default": draw a rectangle with the label right on top of the rectangle. (default option) - "flag": draw a vertical line connects the detected object and the label. No rectangle is drawn. - "t-label": draw a rectangle with a vertical line on top of the rectangle, which points to the label. For more information, see https://github.com/shoumikchow/bbox-visualizer 2. draw_label: bool, default True. If False, the label won't be drawn. This option is only valid when bbox_style is "default". This option is ignored otherwise.

Returns

Image.Image The image with bounding boxes drawn.

Raises

ValueError When the value of bbox_style is not supported.

Source code in landingai/visualize.py
def overlay_bboxes(
    predictions: List[ObjectDetectionPrediction],
    image: Union[np.ndarray, Image.Image],
    options: Optional[Dict[str, Any]] = None,
) -> Image.Image:
    """Draw bounding boxes on the input image and return the image with bounding boxes drawn.
    The bounding boxes are drawn using the bbox-visualizer package.

    Parameters
    ----------
    predictions
        A list of ObjectDetectionPrediction, each of which contains the bounding box and the predicted class.
    image
        The source image to draw the bounding boxes on.
    options
        Options to customize the drawing. Currently, it supports the following options:
        1. `bbox_style`: str, the style of the bounding box.
            - "default": draw a rectangle with the label right on top of the rectangle. (default option)
            - "flag": draw a vertical line connects the detected object and the label. No rectangle is drawn.
            - "t-label": draw a rectangle with a vertical line on top of the rectangle, which points to the label.
            For more information, see https://github.com/shoumikchow/bbox-visualizer
        2. `draw_label`: bool, default True. If False, the label won't be drawn. This option is only valid when bbox_style is "default". This option is ignored otherwise.

    Returns
    -------
    Image.Image
        The image with bounding boxes drawn.

    Raises
    ------
    ValueError
        When the value of bbox_style is not supported.
    """
    import bbox_visualizer as bbv

    if isinstance(image, Image.Image):
        if image.mode == "RGBA":
            image = np.asarray(image)[:, :, :3]  # Get rid of the alpha channel
        else:
            image = np.asarray(image)

    # Numpy arrays created from PIL images are read-only by default. By copying it, we make it writeable.
    image = image.copy()

    if options is None:
        options = {}
    bbox_style = options.get("bbox_style", "default").lower()
    for pred in predictions:
        bbox = pred.bboxes
        label = f"{pred.label_name} | {pred.score:.2f}"
        if bbox_style == "flag":
            image = bbv.draw_flag_with_label(image, label, bbox)
        else:
            draw_bg = options.get("draw_bg", True)
            label_at_top = options.get("top", True)
            image = bbv.draw_rectangle(image, pred.bboxes)
            if bbox_style == "default":
                if options.get("draw_label", True):
                    image = bbv.add_label(
                        image, label, bbox, draw_bg=draw_bg, top=label_at_top
                    )
            elif bbox_style == "t-label":
                image = bbv.add_T_label(image, label, bbox, draw_bg=draw_bg)
            else:
                raise ValueError(
                    f"Unknown bbox_style: {bbox_style}. Supported types are: default (rectangle), flag, t-label. Fore more information, see https://github.com/shoumikchow/bbox-visualizer."
                )
    return Image.fromarray(image)

overlay_colored_masks(predictions, image, options=None)

Draw colored masks on the input image and return the image with colored masks drawn.

NOTE: - The image is converted to grayscale first, and then the colored masks are drawn on top of it. - The colored masks are drawn using the segmentation-mask-overlay package.

Parameters

predictions A list of SegmentationPrediction, each of which contains the segmentation mask and the predicted class. image The source image to draw the colored masks on. options Options to customize the drawing. Currently, it supports the following options: 1. color_map: dict, default empty. A map of label names to colors. For any labels that don't have a color, a color will be assigned to it. The color is any value acceptable by PIL. The label name are case insensitive. Example:

{
    "label1": "red",
    "label2": "#add8e6",
}
2. mask_alpha: float, default 0.5. The alpha value of the colored masks. The value should be between 0 and 1. Returns


Image.Image The image with segmented masks drawn.

Source code in landingai/visualize.py
def overlay_colored_masks(
    predictions: List[SegmentationPrediction],
    image: Union[np.ndarray, Image.Image],
    options: Optional[Dict[str, Any]] = None,
) -> Image.Image:
    """Draw colored masks on the input image and return the image with colored masks drawn.

    NOTE:
    - The image is converted to grayscale first, and then the colored masks are drawn on top of it.
    - The colored masks are drawn using the segmentation-mask-overlay package.

    Parameters
    ----------
    predictions
        A list of SegmentationPrediction, each of which contains the segmentation mask and the predicted class.
    image
        The source image to draw the colored masks on.
    options
        Options to customize the drawing. Currently, it supports the following options:
        1. `color_map`: dict, default empty. A map of label names to colors. For any labels that don't have a color, a color will be assigned to it.
        The color is any value acceptable by PIL. The label name are case insensitive.
        Example:
            ```
            {
                "label1": "red",
                "label2": "#add8e6",
            }
            ```
        2. `mask_alpha`: float, default 0.5. The alpha value of the colored masks. The value should be between 0 and 1.
    Returns
    -------
    Image.Image
        The image with segmented masks drawn.
    """
    from segmentation_mask_overlay import overlay_masks

    if options is None:
        options = {}
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image).convert(mode="L")
    masks = [pred.decoded_boolean_mask.astype(np.bool_) for pred in predictions]
    labels = [pred.label_name for pred in predictions]
    mask_alpha = options.get("mask_alpha", 0.5)
    cmap = _populate_missing_colors(
        options.get("color_map", {}), set(labels), mask_alpha
    )
    colors = [cmap[label.upper()] for label in labels]
    result = overlay_masks(
        image,
        masks,
        labels=labels,
        colors=colors,
        mask_alpha=mask_alpha,
        return_pil_image=True,
    )
    return cast(Image.Image, result)

overlay_ocr_predictions(predictions, image, options=None)

Draw the predicted texts and boxes on the input image with a side-by-side view.

Parameters

predictions A list of OcrPrediction, each of which contains the polygon and the predicted text and score. image The source image to draw the polygon on. options Options to customize the drawing. Currently, no options are supported.

Returns

Image The image with the polygon and text drawn.

Source code in landingai/visualize.py
def overlay_ocr_predictions(
    predictions: List[OcrPrediction],
    image: Union[np.ndarray, Image.Image],
    options: Optional[Dict[str, Any]] = None,
) -> Image.Image:
    """Draw the predicted texts and boxes on the input image with a side-by-side view.

    Parameters
    ----------
    predictions
        A list of OcrPrediction, each of which contains the polygon and the predicted text and score.
    image
        The source image to draw the polygon on.
    options
        Options to customize the drawing. Currently, no options are supported.

    Returns
    -------
    Image
        The image with the polygon and text drawn.
    """
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    texts = [pred.text for pred in predictions]
    boxes = [pred.location for pred in predictions]
    h, w = image.height, image.width
    img_left = image.copy()
    img_right = np.ones((h, w, 3), dtype=np.uint8) * 255
    random.seed(0)
    draw_left = ImageDraw.Draw(img_left)
    for _, (box, txt) in enumerate(zip(boxes, texts)):
        color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))
        draw_left.polygon(box, fill=color)
        img_right_text = _draw_box_text((w, h), box, txt)
        pts = np.array(box, np.int32).reshape((-1, 1, 2))
        cv2.polylines(img_right_text, [pts], True, color, 1)
        img_right = np.array(cv2.bitwise_and(img_right, img_right_text), dtype=np.uint8)
    img_left = Image.blend(image, img_left, 0.5)
    img_show = Image.new("RGB", (w * 2, h), (255, 255, 255))
    img_show.paste(img_left, (0, 0, w, h))
    img_show.paste(Image.fromarray(img_right), (w, 0, w * 2, h))
    return img_show

overlay_predicted_class(predictions, image, options=None)

Draw the predicted class on the input image and return the image with the predicted class drawn.

Parameters

predictions A list of ClassificationPrediction, each of which contains the predicted class and the score. image The source image to draw the colored masks on. options Options to customize the drawing. Currently, it supports the following options: 1. text_position: tuple[int, int]. The position of the text relative to the left bottom of the image. The default value is (10, 25).

Returns

Image.Image the image with segmented masks drawn.

Source code in landingai/visualize.py
def overlay_predicted_class(
    predictions: List[ClassificationPrediction],
    image: Union[np.ndarray, Image.Image],
    options: Optional[Dict[str, Any]] = None,
) -> Image.Image:
    """Draw the predicted class on the input image and return the image with the predicted class drawn.

    Parameters
    ----------
    predictions
        A list of ClassificationPrediction, each of which contains the predicted class and the score.
    image
        The source image to draw the colored masks on.
    options
        Options to customize the drawing. Currently, it supports the following options:
        1. `text_position`: tuple[int, int]. The position of the text relative to the left bottom of the image. The default value is (10, 25).

    Returns
    -------
    Image.Image
        the image with segmented masks drawn.
    """
    if options is None:
        options = {}
    if isinstance(image, np.ndarray):
        image = Image.fromarray(image)
    assert len(predictions) == 1
    text_position = options.get("text_position", (10, 25))
    prediction = predictions[0]
    text = f"{prediction.label_name} {prediction.score:.2f}"
    draw = ImageDraw.Draw(image)
    font = _get_pil_font()
    xy = (text_position[0], image.size[1] - text_position[1])
    box = draw.textbbox(xy=xy, text=text, font=font)
    box = (box[0] - 10, box[1] - 5, box[2] + 10, box[3] + 5)
    draw.rounded_rectangle(box, radius=15, fill="#333333")
    draw.text(xy=xy, text=text, fill="white", font=font)
    return image

overlay_predictions(predictions, image, options=None)

Overlay the prediction results on the input image and return the image with the overlay.

Source code in landingai/visualize.py
def overlay_predictions(
    predictions: List[Prediction],
    image: Union[np.ndarray, Image.Image],
    options: Optional[Dict[str, Any]] = None,
) -> Image.Image:
    """Overlay the prediction results on the input image and return the image with the overlay."""
    if len(predictions) == 0:
        _LOGGER.warning("No predictions to overlay, returning original image")
        if isinstance(image, Image.Image):
            return image
        else:
            return Image.fromarray(image)
    types = {type(pred) for pred in predictions}
    assert len(types) == 1, f"Expecting only one type of prediction, got {types}"
    pred_type = types.pop()
    overlay_func: Callable[
        [List[Prediction], Union[np.ndarray, Image.Image], Optional[Dict]], Image.Image
    ] = _OVERLAY_FUNC_MAP[pred_type]
    return overlay_func(predictions, image, options)