记录个人处理图像时,为去除图像中含有的标签框所用方法,以防以后找不到。

具体思路参考该博客:

python 去除图像中的框_python去掉矩形边框 数字图像处理

函数定义

导包:

from PIL import Image
import numpy as np
from sklearn.ensemble import IsolationForest
from collections import Counter
import cv2, os
from tqdm import tqdm

加载图像

首先加载图像,使用该函数可以允许读取带有中文路径的图像

def cv_imread(filePath):
    cv_img = cv2.imdecode(np.fromfile(filePath, dtype=np.uint8), -1)
    ## imdecode读取的是rgb,如果后续需要opencv处理的话,需要转换成bgr,转换后图片颜色会变化
    ##cv_img=cv2.cvtColor(cv_img,cv2.COLOR_RGB2BGR)
    return cv_img

获取标签框

获取图像中不同颜色框的位置,这里采取的是HSV颜色空间方法,确定标签框的颜色阈值进行划分

def get_coordinate(img):
    # 转换为HSV颜色空间
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # print(hsv)
    # 定义不同颜色的HSV值范围
    color1_lower = np.array([30, 80, 255])
    color1_upper = np.array([40, 200, 255])

    # 在HSV图像上应用颜色范围掩码
    mask1 = cv2.inRange(hsv, color1_lower, color1_upper)

    # 对原始图像和掩码进行位运算
    result1 = cv2.bitwise_and(img, img, mask=mask1)

    # 查找矩形框的轮廓
    contours1, hierarchy1 = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 获取轮廓对应坐标
    labels = []
    for contour1 in contours1:
        x, y, w, h = cv2.boundingRect(contour1)
        labels.append((x, y, x + w, y + h))
    return labels

去除标签框主体

# 判断颜色是否相似
def my_similar(a, b):
    if sum([abs(x - y) for (x, y) in zip(list(a), list(b))]) / 3 < threhold:
        return True
    return False


# 判断颜色是否相似
def my_similar_border(a, b_s):
    for b in b_s:
        if sum([abs(x - y) for (x, y) in zip(list(a), list(b)[0])]) / 3 < threhold_border:
            return True
    return False


# 获取异常值,用于处理边框的边缘Q
def get_outlier(tmp, model):
    normal_point = []
    unnormal_point = []
    # 将数据传递给模型进行训练
    model.fit(tmp)

    # 获取每个数据点的预测标签,-1 表示异常点,1 表示正常点
    labels = model.predict(tmp)

    # 打印每个数据点的标签
    for i, label in enumerate(labels):
        if label == -1:
            unnormal_point.append(tmp[i])
        if label == 1:
            normal_point.append(tmp[i])
    return normal_point, unnormal_point


# 在水平或处置方向上获取邻居节点,以便进行颜色替换
def get_right_neighborhood(target_color, neighborhood_x, neighborhood_y):
    diff_x = 0
    diff_y = 0

    for neighborhood in neighborhood_x:
        diff_x += abs(sum([x - y for x, y in zip(list(neighborhood), list(target_color))]) / 3)
    for neighborhood in neighborhood_y:
        diff_y += abs(sum([x - y for x, y in zip(list(neighborhood), list(target_color))]) / 3)
    if diff_x > diff_y:
        return neighborhood_x, 1
    return neighborhood_y, 2


def replace_color_around_point(image, x, y, x_1, y_1, x_2, y_2, radius):
    # 采样步数
    step = 0

    # 打开图像
    pixels = image.load()

    # 获取图像的宽度和高度
    width, height = image.size

    ############################开始对边框的内部节点进行处理################################

    # 获取目标像素点的颜色
    target_color = pixels[x, y]
    # 循环遍历图像中的每个像素点
    for i in range(x_1, x_2):
        for j in range(y_1, y_2):
            # 如果像素颜色与目标颜色相同,则替换为周围区域颜色的平均值
            if my_similar(pixels[i, j], target_color):
                # 计算周围水平和垂直区域的颜色平均值
                neighborhood_x = []
                neighborhood_y = []

                for m in range(i - redius, i + radius + 1):
                    if 0 <= m < width:
                        neighborhood_x.append(pixels[m, j])

                for n in range(j - redius, j + radius + 1):
                    if 0 <= n < height:
                        neighborhood_y.append(pixels[i, n])

                neighborhood, direction = get_right_neighborhood(target_color, neighborhood_x, neighborhood_y)
                neighborhood = [n for n in neighborhood if not my_similar(n, target_color)]
                neighborhood = np.array(neighborhood)
                average_color = tuple(np.mean(neighborhood, axis=0, dtype=int))
                average_color_part_1 = tuple(np.mean(neighborhood[0:int(len(neighborhood) / 2)], axis=0, dtype=int))
                average_color_part_2 = tuple(np.mean(neighborhood[int(len(neighborhood) / 2) + 1:], axis=0, dtype=int))

                # 替换像素颜色
                pixels[i, j] = average_color

                # 如果是水平方向
                if direction == 1:
                    for m in range(i - int(redius_border / 3), i):
                        if 0 <= m < width:
                            pixels[m, j] = average_color_part_1

                    for m in range(i, i + int(redius_border / 3) + 1):
                        if 0 <= m < width:
                            pixels[m, j] = average_color_part_2

                # 如果是垂直方向
                if direction == 2:
                    for n in range(j - int(redius_border / 3), j):
                        if 0 <= n < height:
                            pixels[i, n] = average_color_part_1

                    for n in range(j, j + int(redius_border / 3) + 1):
                        if 0 <= n < height:
                            pixels[i, n] = average_color_part_2

                step += 1

                if step % step_add == 0 and step < step_max:
                    normal_point, unnormal_point = get_outlier(neighborhood, model)
                    unnormal_point = [tuple(x) for x in unnormal_point]
                    all_outliers.extend(unnormal_point)

    ############################开始对边框的边缘节点进行处理################################

    # 使用Counter来统计三元组的出现次数
    all_outlier_counts = Counter(all_outliers)

    # 获取出现次数最多的十个三元组,用来去边框
    top_outliers = all_outlier_counts.most_common(top_outlier_num)

    # 循环遍历图像中的每个像素点
    for i in range(x_1, x_2):
        for j in range(y_1, y_2):

            # 如果该点和异常点中的颜色相似,就进行替换
            if my_similar_border(pixels[i, j], top_outliers):
                # 计算周围水平和垂直区域的颜色平均值
                neighborhood_x = []
                neighborhood_y = []

                for m in range(i - int(radius / 3), i + int(radius / 3) + 1):
                    if 0 <= m < width:
                        neighborhood_x.append(pixels[m, j])

                for n in range(j - int(radius / 3), j + int(radius / 3) + 1):
                    if 0 <= n < height:
                        neighborhood_y.append(pixels[i, n])

                neighborhood, direction = get_right_neighborhood(target_color, neighborhood_x, neighborhood_y)
                neighborhood = [n for n in neighborhood if not n in top_outliers]
                neighborhood = np.array(neighborhood)
                average_color = tuple(np.mean(neighborhood, axis=0, dtype=int))

                # 替换像素颜色
                pixels[i, j] = average_color
    return image

个人运行代码如下:

其中 threhold 参数自己使用的时候为 1 ,效果还可以。 redius 必须为 的倍数,redius_border 同上。保存图像时需要先进行.convert('RGB')操作。

if __name__ == '__main__':
    # 判断rgb相似度用,可以小一些
    threhold = 1
    threhold_border = 3

    # 采样半径,可以设的小一些,可以根据边框的粗细来设定
    redius = 9
    redius_border = 9

    # 对边框进行处理需要调整的参数
    step_add = 37
    step_max = 2000
    step = 0
    all_outliers = []
    top_outlier_num = 10

    # 创建Isolation Forest模型
    model = IsolationForest(contamination=0.05)  # 设置异常点的比例

    image_file = r""  # 替换为您的输入图像路径
    for jpg in tqdm(os.listdir(image_file)):
        if ".jpg" in jpg:
            jpg_path = os.path.join(image_file, jpg)  # 图片路径
            image = Image.open(jpg_path)  # 读取图片

            # 获取框位置(代码)
            imag=cv_imread(jpg_path)
            labels=get_coordinate(imag)
            for i in labels:
                x_1, y_1, x_2, y_2 = i
                x, y = x_1, y_1
                try:
                    image = replace_color_around_point(image, x, y, x_1, y_1, x_2, y_2, redius)
                except:
                    image=image
                    print(jpg)

            image.show()
            # image = image.convert('RGB').save(jpg_path)

最后附上连续代码,以后万一又用上了呢(笑)

from PIL import Image
import numpy as np
from sklearn.ensemble import IsolationForest
from collections import Counter
import cv2, os
from tqdm import tqdm


def cv_imread(filePath):
    cv_img = cv2.imdecode(np.fromfile(filePath, dtype=np.uint8), -1)
    ## imdecode读取的是rgb,如果后续需要opencv处理的话,需要转换成bgr,转换后图片颜色会变化
    ##cv_img=cv2.cvtColor(cv_img,cv2.COLOR_RGB2BGR)
    return cv_img


def get_coordinate(img):
    # 转换为HSV颜色空间
    hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    # print(hsv)
    # 定义不同颜色的HSV值范围
    color1_lower = np.array([30, 80, 255])
    color1_upper = np.array([40, 200, 255])

    # 在HSV图像上应用颜色范围掩码
    mask1 = cv2.inRange(hsv, color1_lower, color1_upper)

    # 对原始图像和掩码进行位运算
    result1 = cv2.bitwise_and(img, img, mask=mask1)

    # 查找矩形框的轮廓
    contours1, hierarchy1 = cv2.findContours(mask1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)

    # 获取轮廓对应坐标
    labels = []
    for contour1 in contours1:
        x, y, w, h = cv2.boundingRect(contour1)
        labels.append((x, y, x + w, y + h))
    return labels


# 判断颜色是否相似
def my_similar(a, b):
    if sum([abs(x - y) for (x, y) in zip(list(a), list(b))]) / 3 < threhold:
        return True
    return False


# 判断颜色是否相似
def my_similar_border(a, b_s):
    for b in b_s:
        if sum([abs(x - y) for (x, y) in zip(list(a), list(b)[0])]) / 3 < threhold_border:
            return True
    return False


# 获取异常值,用于处理边框的边缘Q
def get_outlier(tmp, model):
    normal_point = []
    unnormal_point = []
    # 将数据传递给模型进行训练
    model.fit(tmp)

    # 获取每个数据点的预测标签,-1 表示异常点,1 表示正常点
    labels = model.predict(tmp)

    # 打印每个数据点的标签
    for i, label in enumerate(labels):
        if label == -1:
            unnormal_point.append(tmp[i])
        if label == 1:
            normal_point.append(tmp[i])
    return normal_point, unnormal_point


# 在水平或处置方向上获取邻居节点,以便进行颜色替换
def get_right_neighborhood(target_color, neighborhood_x, neighborhood_y):
    diff_x = 0
    diff_y = 0

    for neighborhood in neighborhood_x:
        diff_x += abs(sum([x - y for x, y in zip(list(neighborhood), list(target_color))]) / 3)
    for neighborhood in neighborhood_y:
        diff_y += abs(sum([x - y for x, y in zip(list(neighborhood), list(target_color))]) / 3)
    if diff_x > diff_y:
        return neighborhood_x, 1
    return neighborhood_y, 2


def replace_color_around_point(image, x, y, x_1, y_1, x_2, y_2, radius):
    # 采样步数
    step = 0

    # 打开图像
    pixels = image.load()

    # 获取图像的宽度和高度
    width, height = image.size

    ############################开始对边框的内部节点进行处理################################

    # 获取目标像素点的颜色
    target_color = pixels[x, y]
    # 循环遍历图像中的每个像素点
    for i in range(x_1, x_2):
        for j in range(y_1, y_2):
            # 如果像素颜色与目标颜色相同,则替换为周围区域颜色的平均值
            if my_similar(pixels[i, j], target_color):
                # 计算周围水平和垂直区域的颜色平均值
                neighborhood_x = []
                neighborhood_y = []

                for m in range(i - redius, i + radius + 1):
                    if 0 <= m < width:
                        neighborhood_x.append(pixels[m, j])

                for n in range(j - redius, j + radius + 1):
                    if 0 <= n < height:
                        neighborhood_y.append(pixels[i, n])

                neighborhood, direction = get_right_neighborhood(target_color, neighborhood_x, neighborhood_y)
                neighborhood = [n for n in neighborhood if not my_similar(n, target_color)]
                neighborhood = np.array(neighborhood)
                average_color = tuple(np.mean(neighborhood, axis=0, dtype=int))
                average_color_part_1 = tuple(np.mean(neighborhood[0:int(len(neighborhood) / 2)], axis=0, dtype=int))
                average_color_part_2 = tuple(np.mean(neighborhood[int(len(neighborhood) / 2) + 1:], axis=0, dtype=int))

                # 替换像素颜色
                pixels[i, j] = average_color

                # 如果是水平方向
                if direction == 1:
                    for m in range(i - int(redius_border / 3), i):
                        if 0 <= m < width:
                            pixels[m, j] = average_color_part_1

                    for m in range(i, i + int(redius_border / 3) + 1):
                        if 0 <= m < width:
                            pixels[m, j] = average_color_part_2

                # 如果是垂直方向
                if direction == 2:
                    for n in range(j - int(redius_border / 3), j):
                        if 0 <= n < height:
                            pixels[i, n] = average_color_part_1

                    for n in range(j, j + int(redius_border / 3) + 1):
                        if 0 <= n < height:
                            pixels[i, n] = average_color_part_2

                step += 1

                if step % step_add == 0 and step < step_max:
                    normal_point, unnormal_point = get_outlier(neighborhood, model)
                    unnormal_point = [tuple(x) for x in unnormal_point]
                    all_outliers.extend(unnormal_point)

    ############################开始对边框的边缘节点进行处理################################

    # 使用Counter来统计三元组的出现次数
    all_outlier_counts = Counter(all_outliers)

    # 获取出现次数最多的十个三元组,用来去边框
    top_outliers = all_outlier_counts.most_common(top_outlier_num)

    # 循环遍历图像中的每个像素点
    for i in range(x_1, x_2):
        for j in range(y_1, y_2):

            # 如果该点和异常点中的颜色相似,就进行替换
            if my_similar_border(pixels[i, j], top_outliers):
                # 计算周围水平和垂直区域的颜色平均值
                neighborhood_x = []
                neighborhood_y = []

                for m in range(i - int(radius / 3), i + int(radius / 3) + 1):
                    if 0 <= m < width:
                        neighborhood_x.append(pixels[m, j])

                for n in range(j - int(radius / 3), j + int(radius / 3) + 1):
                    if 0 <= n < height:
                        neighborhood_y.append(pixels[i, n])

                neighborhood, direction = get_right_neighborhood(target_color, neighborhood_x, neighborhood_y)
                neighborhood = [n for n in neighborhood if not n in top_outliers]
                neighborhood = np.array(neighborhood)
                average_color = tuple(np.mean(neighborhood, axis=0, dtype=int))

                # 替换像素颜色
                pixels[i, j] = average_color
    return image


if __name__ == '__main__':
    # 判断rgb相似度用,可以小一些
    threhold = 1
    threhold_border = 3

    # 采样半径,可以设的小一些,可以根据边框的粗细来设定
    redius = 9
    redius_border = 9

    # 对边框进行处理需要调整的参数
    step_add = 37
    step_max = 2000
    step = 0
    all_outliers = []
    top_outlier_num = 10

    # 创建Isolation Forest模型
    model = IsolationForest(contamination=0.05)  # 设置异常点的比例

    image_file = r""  # 替换为您的输入图像路径
    for jpg in tqdm(os.listdir(image_file)):
        if ".jpg" in jpg:
            jpg_path = os.path.join(image_file, jpg)  # 图片路径
            image = Image.open(jpg_path)  # 读取图片

            # 获取框位置(代码)
            imag=cv_imread(jpg_path)
            labels=get_coordinate(imag)
            for i in labels:
                x_1, y_1, x_2, y_2 = i
                x, y = x_1, y_1
                try:
                    image = replace_color_around_point(image, x, y, x_1, y_1, x_2, y_2, redius)
                except:
                    image=image
                    print(jpg)

            image.show()
            # image = image.convert('RGB').save(jpg_path)