diff --git a/plugins/ai_method/basic_cd.py b/plugins/ai_method/basic_cd.py index d772c85..07049af 100644 --- a/plugins/ai_method/basic_cd.py +++ b/plugins/ai_method/basic_cd.py @@ -125,16 +125,24 @@ class BasicAICD(AlgFrontend): block_size = (xsize, cell_size[1]) if block_xy[1] + block_size[1] > ysize: block_size = (xsize, ysize - block_xy[1]) + + np.seterr(divide='ignore',invalid='ignore') block_data = temp_in_ds.ReadAsArray(*block_xy, *block_size) + max_diff=block_data.max() + min_diff=block_data.min() + block_data = (block_data - min_diff) / (max_diff - min_diff) * 255 + block_data = block_data.astype(np.uint8) out_normal_ds.GetRasterBand(1).WriteArray(block_data, *block_xy) + # os.system('pause') # hist_t, _ = np.histogram(block_data, bins=256, range=(0, 256)) # hist += hist_t # print(hist) del temp_in_ds del out_normal_ds try: + # os.system('pause') os.remove(out_tif) except: pass diff --git a/plugins/evaluation/main.py b/plugins/evaluation/main.py index 1a772b1..3193e41 100644 --- a/plugins/evaluation/main.py +++ b/plugins/evaluation/main.py @@ -144,8 +144,9 @@ class EvaluationPlugin(BasicPlugin): for k in range(2): for l in range(2): - cfm[k,l] += np.sum((pred_block == k) & (gt_block >= l)) - + # cfm[k,l] += np.sum((pred_block == k) & (gt_block >= l)) + cfm[k,l] += np.sum((pred_block == k) & (gt_block == l)) + result_path = os.path.join(Project().other_path, f'{layer.name}_{os.path.basename(gt)}_evaluation.txt') with open(result_path, 'w', encoding='utf-8') as f: f.write(f'预测结果:{layer.path}\n') @@ -169,7 +170,7 @@ class EvaluationPlugin(BasicPlugin): Kappa: {kappa(cfm)} ''') f.flush() - + # os.system('pause') txt=f''' 预测结果:{layer.path}\n @@ -193,12 +194,13 @@ class EvaluationPlugin(BasicPlugin): F1: {cfm[1,1] / (np.sum(cfm[1,:]) + np.sum(cfm[:,1]) - cfm[1,1])} Kappa: {kappa(cfm)} ''' + + # self.msgbox = Win(txt) + # self.msgbox.show() - self.msgbox = Win(txt) - self.msgbox.show() # os.system(f'c:/windows/notepad.exe "{result_path}"') - # self.send_message.emit('精度评估完成') + self.send_message.emit('精度评估完成,结果保存在'+result_path) def show_dialog(self): dialog = EvalutationDialog(self.mainwindow) @@ -212,10 +214,13 @@ class EvaluationPlugin(BasicPlugin): t.start() from .window import Ui_Form -class Win(QWidget,Ui_Form): +# class Win(QWidget,Ui_Form): + +# def __init__(self,txt): +# super().__init__() + +# self.setupUi(self) +# self.label.setText(txt) + - def __init__(self,txt): - super().__init__() - self.setupUi(self) - self.label.setText(txt) \ No newline at end of file diff --git a/plugins/export_to/main.py b/plugins/export_to/main.py index 13637d6..e6b02eb 100644 --- a/plugins/export_to/main.py +++ b/plugins/export_to/main.py @@ -138,7 +138,7 @@ class ExportPlugin(BasicPlugin): [1, 255, 0, 0] ] - if self.style_path is '': + if self.style_path == '': style = default_style else: style = np.loadtxt(self.style_path, comments='#', delimiter=',') @@ -160,27 +160,42 @@ class ExportPlugin(BasicPlugin): # TODO: check data range is 0-1 def get_color(v): - first_color = [] - second_color = [] - first_value = 0 - second_value = 1 - for s in style: - if s[0] <= v: - first_value = s[0] - first_color = s[1:] - else: - second_value = s[0] - second_color = s[1:] - break - if second_value == 1: - return np.array(style[-1][1:]) + # first_color = [] + # second_color = [] + # first_value = 0 + # second_value = 1 + # for s in style: + # if s[0] <= v: + # first_value = s[0] + # first_color = s[1:] + # else: + # second_value = s[0] + # second_color = s[1:] + # break + # if second_value == 1: + # return np.array(style[-1][1:]) + + # first_dis = (v - first_value) / (second_value - first_value) + # second_dis = (second_value - v) / (second_value - first_value) + # first_color = np.array(first_color) + # second_color = np.array(second_color) + # color = first_color* first_dis + second_color * second_dis + # return np.floor(color) + + if v==0: + return np.array(style[-1][1:]) + elif v==1: + return np.array(style[-2][1:]) + else: + first_color =style[-1][1:] + second_color=style[-2][1:] + + first_color = np.array(first_color) + second_color=np.array(second_color) + color_inter=first_color*(1-v)+second_color*v + return color_inter + - first_dis = (v - first_value) / (second_value - first_value) - second_dis = (second_value - v) / (second_value - first_value) - first_color = np.array(first_color) - second_color = np.array(second_color) - color = first_color* first_dis + second_color * second_dis - return np.floor(color) get_color = np.frompyfunc(get_color, nin=1, nout=1) diff --git a/plugins/filter_collection/lms_filter.py b/plugins/filter_collection/lms_filter.py index 704ed77..777eef7 100644 --- a/plugins/filter_collection/lms_filter.py +++ b/plugins/filter_collection/lms_filter.py @@ -13,46 +13,37 @@ from datetime import datetime import numpy as np import torch import torch.nn.functional as F -def adaptiveMedianDeNoise(count, original): - # 初始窗口大小 - startWindow = 3 - # 卷积范围 - c = count // 2 - rows, cols = original.shape - newI = np.zeros(original.shape) - # median = - - for i in range(c, rows - c): - for j in range(c, cols - c): - startWindow = 3 - k = int(startWindow / 2) - median = np.median(original[i - k:i + k + 1, j - k:j + k + 1]) - mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1]) - ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1]) - if mi < median < ma: - if mi < original[i, j] < ma: - newI[i, j] = original[i, j] - else: - newI[i, j] = median - - else: - while True: - startWindow = startWindow + 2 - k = int(startWindow / 2) - median = np.median(original[i - k:i + k + 1, j - k:j + k + 1]) - mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1]) - ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1]) - - if mi < median < ma or startWindow >= count: - break - - if mi < median < ma or startWindow > count: - if mi < original[i, j] < ma: - newI[i, j] = original[i, j] +import cv2 +from tqdm import tqdm +def adaptiveMedianDeNoise(count, image): + origen = 3 # 初始窗口大小 + board = origen//2 # 初始应扩充的边界 + # max_board = max_size//2 # 最大可扩充的边界 + copy = cv2.copyMakeBorder(image, *[board]*4, borderType=cv2.BORDER_DEFAULT) # 扩充边界 + out_img = np.zeros(image.shape) + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + def sub_func(src, size): # 两个层次的子函数 + kernel = src[i:i+size, j:j+size] + # print(kernel) + z_med = np.median(kernel) + z_max = np.max(kernel) + z_min = np.min(kernel) + if z_min < z_med < z_max: # 层次A + if z_min < image[i][j] < z_max: # 层次B + return image[i][j] else: - newI[i, j] = median + return z_med + else: + next_size = cv2.copyMakeBorder(src, *[1]*4, borderType=cv2.BORDER_DEFAULT) # 增尺寸 + size = size+2 # 奇数的核找中值才准确 + if size <= count: + return sub_func(next_size, size) # 重复层次A + else: + return z_med + out_img[i][j] = sub_func(copy, origen) - return newI + return out_img @FILTER.register class AdaptiveFilter(AlgFrontend): diff --git a/plugins/filter_collection/lmsnp_filter.py b/plugins/filter_collection/lmsnp_filter.py index 8d773d8..8927bbe 100644 --- a/plugins/filter_collection/lmsnp_filter.py +++ b/plugins/filter_collection/lmsnp_filter.py @@ -13,45 +13,38 @@ from datetime import datetime import numpy as np import torch import torch.nn.functional as F -def adaptiveMedianDeNoise(count, original): - # 初始窗口大小 - startWindow = 3 - # 卷积范围 - c = count // 2 - rows, cols = original.shape - newI = np.zeros(original.shape) - # median = - - for i in range(c, rows - c): - for j in range(c, cols - c): - k = int(startWindow / 2) - median = np.median(original[i - k:i + k + 1, j - k:j + k + 1]) - mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1]) - ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1]) - if mi < median < ma: - if mi < original[i, j] < ma: - newI[i, j] = original[i, j] - else: - newI[i, j] = median - - else: - while True: - startWindow = startWindow + 2 - k = int(startWindow / 2) - median = np.median(original[i - k:i + k + 1, j - k:j + k + 1]) - mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1]) - ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1]) - - if mi < median < ma or startWindow >= count: - break - - if mi < median < ma or startWindow >= count: - if mi < original[i, j] < ma: - newI[i, j] = original[i, j] +import pandas as pd +import cv2 +from tqdm import tqdm +def adaptiveMedianDeNoise(count, image): + origen = 3 # 初始窗口大小 + board = origen//2 # 初始应扩充的边界 + # max_board = max_size//2 # 最大可扩充的边界 + copy = cv2.copyMakeBorder(image, *[board]*4, borderType=cv2.BORDER_DEFAULT) # 扩充边界 + out_img = np.zeros(image.shape) + for i in tqdm(range(image.shape[0])): + for j in range(image.shape[1]): + def sub_func(src, size): # 两个层次的子函数 + kernel = src[i:i+size, j:j+size] + # print(kernel) + z_med = np.median(kernel) + z_max = np.max(kernel) + z_min = np.min(kernel) + if z_min < z_med < z_max: # 层次A + if z_min < image[i][j] < z_max: # 层次B + return image[i][j] else: - newI[i, j] = median + return z_med + else: + next_size = cv2.copyMakeBorder(src, *[1]*4, borderType=cv2.BORDER_DEFAULT) # 增尺寸 + size = size+2 # 奇数的核找中值才准确 + if size <= count: + return sub_func(next_size, size) # 重复层次A + else: + return z_med + out_img[i][j] = sub_func(copy, origen) - return newI + return out_img @FILTER.register class AdaptiveNPFilter(AlgFrontend): diff --git a/plugins/thres/main.py b/plugins/thres/main.py index 3dc7d73..e1ea94b 100644 --- a/plugins/thres/main.py +++ b/plugins/thres/main.py @@ -138,7 +138,7 @@ class OTSUAlg(AlgFrontend): band = ds.GetRasterBand(1) # band_count = ds.RasterCount - hist = np.zeros(256, dtype=np.int) + hist = np.zeros(256, dtype=np.int_) xsize = ds.RasterXSize ysize = ds.RasterYSize