fixed bugs mentioned in update-0503.xlsx
This commit is contained in:
parent
5c2e6f4e73
commit
fde841c037
@ -125,16 +125,24 @@ class BasicAICD(AlgFrontend):
|
|||||||
block_size = (xsize, cell_size[1])
|
block_size = (xsize, cell_size[1])
|
||||||
if block_xy[1] + block_size[1] > ysize:
|
if block_xy[1] + block_size[1] > ysize:
|
||||||
block_size = (xsize, ysize - block_xy[1])
|
block_size = (xsize, ysize - block_xy[1])
|
||||||
|
|
||||||
|
np.seterr(divide='ignore',invalid='ignore')
|
||||||
block_data = temp_in_ds.ReadAsArray(*block_xy, *block_size)
|
block_data = temp_in_ds.ReadAsArray(*block_xy, *block_size)
|
||||||
|
max_diff=block_data.max()
|
||||||
|
min_diff=block_data.min()
|
||||||
|
|
||||||
block_data = (block_data - min_diff) / (max_diff - min_diff) * 255
|
block_data = (block_data - min_diff) / (max_diff - min_diff) * 255
|
||||||
|
|
||||||
block_data = block_data.astype(np.uint8)
|
block_data = block_data.astype(np.uint8)
|
||||||
out_normal_ds.GetRasterBand(1).WriteArray(block_data, *block_xy)
|
out_normal_ds.GetRasterBand(1).WriteArray(block_data, *block_xy)
|
||||||
|
# os.system('pause')
|
||||||
# hist_t, _ = np.histogram(block_data, bins=256, range=(0, 256))
|
# hist_t, _ = np.histogram(block_data, bins=256, range=(0, 256))
|
||||||
# hist += hist_t
|
# hist += hist_t
|
||||||
# print(hist)
|
# print(hist)
|
||||||
del temp_in_ds
|
del temp_in_ds
|
||||||
del out_normal_ds
|
del out_normal_ds
|
||||||
try:
|
try:
|
||||||
|
# os.system('pause')
|
||||||
os.remove(out_tif)
|
os.remove(out_tif)
|
||||||
except:
|
except:
|
||||||
pass
|
pass
|
||||||
|
@ -144,7 +144,8 @@ class EvaluationPlugin(BasicPlugin):
|
|||||||
|
|
||||||
for k in range(2):
|
for k in range(2):
|
||||||
for l in range(2):
|
for l in range(2):
|
||||||
cfm[k,l] += np.sum((pred_block == k) & (gt_block >= l))
|
# cfm[k,l] += np.sum((pred_block == k) & (gt_block >= l))
|
||||||
|
cfm[k,l] += np.sum((pred_block == k) & (gt_block == l))
|
||||||
|
|
||||||
result_path = os.path.join(Project().other_path, f'{layer.name}_{os.path.basename(gt)}_evaluation.txt')
|
result_path = os.path.join(Project().other_path, f'{layer.name}_{os.path.basename(gt)}_evaluation.txt')
|
||||||
with open(result_path, 'w', encoding='utf-8') as f:
|
with open(result_path, 'w', encoding='utf-8') as f:
|
||||||
@ -169,7 +170,7 @@ class EvaluationPlugin(BasicPlugin):
|
|||||||
Kappa: {kappa(cfm)}
|
Kappa: {kappa(cfm)}
|
||||||
''')
|
''')
|
||||||
f.flush()
|
f.flush()
|
||||||
|
# os.system('pause')
|
||||||
txt=f'''
|
txt=f'''
|
||||||
预测结果:{layer.path}\n
|
预测结果:{layer.path}\n
|
||||||
|
|
||||||
@ -194,11 +195,12 @@ class EvaluationPlugin(BasicPlugin):
|
|||||||
Kappa: {kappa(cfm)}
|
Kappa: {kappa(cfm)}
|
||||||
'''
|
'''
|
||||||
|
|
||||||
self.msgbox = Win(txt)
|
# self.msgbox = Win(txt)
|
||||||
self.msgbox.show()
|
# self.msgbox.show()
|
||||||
|
|
||||||
# os.system(f'c:/windows/notepad.exe "{result_path}"')
|
# os.system(f'c:/windows/notepad.exe "{result_path}"')
|
||||||
|
|
||||||
# self.send_message.emit('精度评估完成')
|
self.send_message.emit('精度评估完成,结果保存在'+result_path)
|
||||||
|
|
||||||
def show_dialog(self):
|
def show_dialog(self):
|
||||||
dialog = EvalutationDialog(self.mainwindow)
|
dialog = EvalutationDialog(self.mainwindow)
|
||||||
@ -212,10 +214,13 @@ class EvaluationPlugin(BasicPlugin):
|
|||||||
t.start()
|
t.start()
|
||||||
|
|
||||||
from .window import Ui_Form
|
from .window import Ui_Form
|
||||||
class Win(QWidget,Ui_Form):
|
# class Win(QWidget,Ui_Form):
|
||||||
|
|
||||||
|
# def __init__(self,txt):
|
||||||
|
# super().__init__()
|
||||||
|
|
||||||
|
# self.setupUi(self)
|
||||||
|
# self.label.setText(txt)
|
||||||
|
|
||||||
|
|
||||||
def __init__(self,txt):
|
|
||||||
super().__init__()
|
|
||||||
|
|
||||||
self.setupUi(self)
|
|
||||||
self.label.setText(txt)
|
|
@ -138,7 +138,7 @@ class ExportPlugin(BasicPlugin):
|
|||||||
[1, 255, 0, 0]
|
[1, 255, 0, 0]
|
||||||
]
|
]
|
||||||
|
|
||||||
if self.style_path is '':
|
if self.style_path == '':
|
||||||
style = default_style
|
style = default_style
|
||||||
else:
|
else:
|
||||||
style = np.loadtxt(self.style_path, comments='#', delimiter=',')
|
style = np.loadtxt(self.style_path, comments='#', delimiter=',')
|
||||||
@ -160,27 +160,42 @@ class ExportPlugin(BasicPlugin):
|
|||||||
# TODO: check data range is 0-1
|
# TODO: check data range is 0-1
|
||||||
|
|
||||||
def get_color(v):
|
def get_color(v):
|
||||||
first_color = []
|
# first_color = []
|
||||||
second_color = []
|
# second_color = []
|
||||||
first_value = 0
|
# first_value = 0
|
||||||
second_value = 1
|
# second_value = 1
|
||||||
for s in style:
|
# for s in style:
|
||||||
if s[0] <= v:
|
# if s[0] <= v:
|
||||||
first_value = s[0]
|
# first_value = s[0]
|
||||||
first_color = s[1:]
|
# first_color = s[1:]
|
||||||
else:
|
# else:
|
||||||
second_value = s[0]
|
# second_value = s[0]
|
||||||
second_color = s[1:]
|
# second_color = s[1:]
|
||||||
break
|
# break
|
||||||
if second_value == 1:
|
# if second_value == 1:
|
||||||
return np.array(style[-1][1:])
|
# return np.array(style[-1][1:])
|
||||||
|
|
||||||
|
# first_dis = (v - first_value) / (second_value - first_value)
|
||||||
|
# second_dis = (second_value - v) / (second_value - first_value)
|
||||||
|
# first_color = np.array(first_color)
|
||||||
|
# second_color = np.array(second_color)
|
||||||
|
# color = first_color* first_dis + second_color * second_dis
|
||||||
|
# return np.floor(color)
|
||||||
|
|
||||||
|
if v==0:
|
||||||
|
return np.array(style[-1][1:])
|
||||||
|
elif v==1:
|
||||||
|
return np.array(style[-2][1:])
|
||||||
|
else:
|
||||||
|
first_color =style[-1][1:]
|
||||||
|
second_color=style[-2][1:]
|
||||||
|
|
||||||
first_dis = (v - first_value) / (second_value - first_value)
|
|
||||||
second_dis = (second_value - v) / (second_value - first_value)
|
|
||||||
first_color = np.array(first_color)
|
first_color = np.array(first_color)
|
||||||
second_color=np.array(second_color)
|
second_color=np.array(second_color)
|
||||||
color = first_color* first_dis + second_color * second_dis
|
color_inter=first_color*(1-v)+second_color*v
|
||||||
return np.floor(color)
|
return color_inter
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
get_color = np.frompyfunc(get_color, nin=1, nout=1)
|
get_color = np.frompyfunc(get_color, nin=1, nout=1)
|
||||||
|
@ -13,46 +13,37 @@ from datetime import datetime
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
def adaptiveMedianDeNoise(count, original):
|
import cv2
|
||||||
# 初始窗口大小
|
from tqdm import tqdm
|
||||||
startWindow = 3
|
def adaptiveMedianDeNoise(count, image):
|
||||||
# 卷积范围
|
origen = 3 # 初始窗口大小
|
||||||
c = count // 2
|
board = origen//2 # 初始应扩充的边界
|
||||||
rows, cols = original.shape
|
# max_board = max_size//2 # 最大可扩充的边界
|
||||||
newI = np.zeros(original.shape)
|
copy = cv2.copyMakeBorder(image, *[board]*4, borderType=cv2.BORDER_DEFAULT) # 扩充边界
|
||||||
# median =
|
out_img = np.zeros(image.shape)
|
||||||
|
for i in tqdm(range(image.shape[0])):
|
||||||
for i in range(c, rows - c):
|
for j in range(image.shape[1]):
|
||||||
for j in range(c, cols - c):
|
def sub_func(src, size): # 两个层次的子函数
|
||||||
startWindow = 3
|
kernel = src[i:i+size, j:j+size]
|
||||||
k = int(startWindow / 2)
|
# print(kernel)
|
||||||
median = np.median(original[i - k:i + k + 1, j - k:j + k + 1])
|
z_med = np.median(kernel)
|
||||||
mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1])
|
z_max = np.max(kernel)
|
||||||
ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1])
|
z_min = np.min(kernel)
|
||||||
if mi < median < ma:
|
if z_min < z_med < z_max: # 层次A
|
||||||
if mi < original[i, j] < ma:
|
if z_min < image[i][j] < z_max: # 层次B
|
||||||
newI[i, j] = original[i, j]
|
return image[i][j]
|
||||||
else:
|
else:
|
||||||
newI[i, j] = median
|
return z_med
|
||||||
|
|
||||||
else:
|
else:
|
||||||
while True:
|
next_size = cv2.copyMakeBorder(src, *[1]*4, borderType=cv2.BORDER_DEFAULT) # 增尺寸
|
||||||
startWindow = startWindow + 2
|
size = size+2 # 奇数的核找中值才准确
|
||||||
k = int(startWindow / 2)
|
if size <= count:
|
||||||
median = np.median(original[i - k:i + k + 1, j - k:j + k + 1])
|
return sub_func(next_size, size) # 重复层次A
|
||||||
mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1])
|
|
||||||
ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1])
|
|
||||||
|
|
||||||
if mi < median < ma or startWindow >= count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if mi < median < ma or startWindow > count:
|
|
||||||
if mi < original[i, j] < ma:
|
|
||||||
newI[i, j] = original[i, j]
|
|
||||||
else:
|
else:
|
||||||
newI[i, j] = median
|
return z_med
|
||||||
|
out_img[i][j] = sub_func(copy, origen)
|
||||||
|
|
||||||
return newI
|
return out_img
|
||||||
|
|
||||||
@FILTER.register
|
@FILTER.register
|
||||||
class AdaptiveFilter(AlgFrontend):
|
class AdaptiveFilter(AlgFrontend):
|
||||||
|
@ -13,45 +13,38 @@ from datetime import datetime
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
import torch
|
import torch
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
def adaptiveMedianDeNoise(count, original):
|
import pandas as pd
|
||||||
# 初始窗口大小
|
import cv2
|
||||||
startWindow = 3
|
from tqdm import tqdm
|
||||||
# 卷积范围
|
def adaptiveMedianDeNoise(count, image):
|
||||||
c = count // 2
|
origen = 3 # 初始窗口大小
|
||||||
rows, cols = original.shape
|
board = origen//2 # 初始应扩充的边界
|
||||||
newI = np.zeros(original.shape)
|
# max_board = max_size//2 # 最大可扩充的边界
|
||||||
# median =
|
copy = cv2.copyMakeBorder(image, *[board]*4, borderType=cv2.BORDER_DEFAULT) # 扩充边界
|
||||||
|
out_img = np.zeros(image.shape)
|
||||||
for i in range(c, rows - c):
|
for i in tqdm(range(image.shape[0])):
|
||||||
for j in range(c, cols - c):
|
for j in range(image.shape[1]):
|
||||||
k = int(startWindow / 2)
|
def sub_func(src, size): # 两个层次的子函数
|
||||||
median = np.median(original[i - k:i + k + 1, j - k:j + k + 1])
|
kernel = src[i:i+size, j:j+size]
|
||||||
mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1])
|
# print(kernel)
|
||||||
ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1])
|
z_med = np.median(kernel)
|
||||||
if mi < median < ma:
|
z_max = np.max(kernel)
|
||||||
if mi < original[i, j] < ma:
|
z_min = np.min(kernel)
|
||||||
newI[i, j] = original[i, j]
|
if z_min < z_med < z_max: # 层次A
|
||||||
|
if z_min < image[i][j] < z_max: # 层次B
|
||||||
|
return image[i][j]
|
||||||
else:
|
else:
|
||||||
newI[i, j] = median
|
return z_med
|
||||||
|
|
||||||
else:
|
else:
|
||||||
while True:
|
next_size = cv2.copyMakeBorder(src, *[1]*4, borderType=cv2.BORDER_DEFAULT) # 增尺寸
|
||||||
startWindow = startWindow + 2
|
size = size+2 # 奇数的核找中值才准确
|
||||||
k = int(startWindow / 2)
|
if size <= count:
|
||||||
median = np.median(original[i - k:i + k + 1, j - k:j + k + 1])
|
return sub_func(next_size, size) # 重复层次A
|
||||||
mi = np.min(original[i - k:i + k + 1, j - k:j + k + 1])
|
|
||||||
ma = np.max(original[i - k:i + k + 1, j - k:j + k + 1])
|
|
||||||
|
|
||||||
if mi < median < ma or startWindow >= count:
|
|
||||||
break
|
|
||||||
|
|
||||||
if mi < median < ma or startWindow >= count:
|
|
||||||
if mi < original[i, j] < ma:
|
|
||||||
newI[i, j] = original[i, j]
|
|
||||||
else:
|
else:
|
||||||
newI[i, j] = median
|
return z_med
|
||||||
|
out_img[i][j] = sub_func(copy, origen)
|
||||||
|
|
||||||
return newI
|
return out_img
|
||||||
|
|
||||||
@FILTER.register
|
@FILTER.register
|
||||||
class AdaptiveNPFilter(AlgFrontend):
|
class AdaptiveNPFilter(AlgFrontend):
|
||||||
|
@ -138,7 +138,7 @@ class OTSUAlg(AlgFrontend):
|
|||||||
band = ds.GetRasterBand(1)
|
band = ds.GetRasterBand(1)
|
||||||
# band_count = ds.RasterCount
|
# band_count = ds.RasterCount
|
||||||
|
|
||||||
hist = np.zeros(256, dtype=np.int)
|
hist = np.zeros(256, dtype=np.int_)
|
||||||
xsize = ds.RasterXSize
|
xsize = ds.RasterXSize
|
||||||
ysize = ds.RasterYSize
|
ysize = ds.RasterYSize
|
||||||
|
|
||||||
|
Loading…
x
Reference in New Issue
Block a user