파이썬으로 Homomorphic Filtering 하기
import cv2 # For OpenCV modules (For Image I/O and Contour Finding) import numpy as np # For general purpose array manipulation import scipy . fftpack # For FFT2 #### imclearborder definition def imclearborder ( imgBW , radius ): # Given a black and white image, first find all of its contours imgBWcopy = imgBW . copy () contours , hierarchy = cv2 . findContours ( imgBWcopy . copy (), cv2 . RETR_LIST , cv2 . CHAIN_APPROX_SIMPLE ) # Get dimensions of image imgRows = imgBW . shape [ 0 ] imgCols = imgBW . shape [ 1 ] contourList = [] # ID list of contours that touch the border # For each contour... for idx in np . arange ( len ( contours )): # Get the i'th contour cnt = contours [ idx ] # Look at each point in the contour for pt in cnt : rowCnt = pt [ 0 ][ 1 ] colCnt = pt [ 0 ][ 0 ] # If this is within the radius of th...

# 패키지 로딩
답글삭제import argparse
import os
# 토치 로딩
from solver import Solver
# 데이타 로딩용
from data_loader import get_loader
# 토치 백엔드.
from torch.backends import cudnn
# 메인 함수, 설정파일 파람
답글삭제def main(config):
cudnn.benchmark = False
# 이미지 로드
data_loader = get_loader(image_path=config.image_path,
image_size=config.image_size,
batch_size=config.batch_size,
num_workers=config.num_workers)
# 솔버 설정.
solver = Solver(config, data_loader)
# Create directories if not exist
# 모델과 샘플 저장용 폴더 생성.
if not os.path.exists(config.model_path):
os.makedirs(config.model_path)
if not os.path.exists(config.sample_path):
os.makedirs(config.sample_path)
# Train and sample the images
# 모드에 따라 학습 또는 샘플링: 샘플링은 생성
if config.mode == 'train':
solver.train()
elif config.mode == 'sample':
solver.sample()
# 메인 루틴
답글삭제if __name__ == '__main__':
# 아규먼트 파서
parser = argparse.ArgumentParser()
# model hyper-parameters
# 모델 파람들
parser.add_argument('--image_size', type=int, default=64)
parser.add_argument('--z_dim', type=int, default=100)
parser.add_argument('--g_conv_dim', type=int, default=64)
parser.add_argument('--d_conv_dim', type=int, default=64)
# training hyper-parameters
# 학습 파람들.
parser.add_argument('--num_epochs', type=int, default=20)
parser.add_argument('--batch_size', type=int, default=6)
parser.add_argument('--sample_size', type=int, default=10)
parser.add_argument('--num_workers', type=int, default=4)
parser.add_argument('--lr', type=float, default=0.0002)
parser.add_argument('--beta1', type=float, default=0.5) # momentum1 in Adam
parser.add_argument('--beta2', type=float, default=0.999) # momentum2 in Adam
# misc
# 기타 파람들.
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--model_path', type=str, default='./models')
parser.add_argument('--sample_path', type=str, default='./samples')
parser.add_argument('--image_path', type=str, default='./data')
parser.add_argument('--log_step', type=int , default=10)
parser.add_argument('--sample_step', type=int , default=50)
# 파람들 콘피그에 저장
config = parser.parse_args()
# 파라미터 출력
print(config)
# 메인루틴 호출.
main(config)
def sample(self):
답글삭제# Load trained parameters
# 생성 및 검사 모델 경로랑 파일명 합치기
g_path = os.path.join(self.model_path, 'generator-%d.pkl' %(self.num_epochs))
d_path = os.path.join(self.model_path, 'discriminator-%d.pkl' %(self.num_epochs))
# 생성 및 검사모델 로딩
self.generator.load_state_dict(torch.load(g_path))
self.discriminator.load_state_dict(torch.load(d_path))
# 모델 이상여부 검사
self.generator.eval()
self.discriminator.eval()
# Sample the images
# 노이즈영상 발생,
noise = self.to_variable(torch.randn(self.sample_size, self.z_dim))
# 생성모델로 페이크 이미지 발생.
fake_images = self.generator(noise)
# 저장할 경로 생성
sample_path = os.path.join(self.sample_path, 'fake_samples-final.png')
# 토치비전으로 저장
torchvision.utils.save_image(self.denorm(fake_images.data), sample_path, nrow=12)
# 결과 출력.
print("Saved sampled images to '%s'" %sample_path)
def sample(self):
답글삭제# Load trained parameters
g_path = os.path.join(self.model_path, 'generator-%d.pkl' %(self.num_epochs))
d_path = os.path.join(self.model_path, 'discriminator-%d.pkl' %(self.num_epochs))
self.generator.load_state_dict(torch.load(g_path))
self.discriminator.load_state_dict(torch.load(d_path))
self.generator.eval()
self.discriminator.eval()
# Sample the images
noise = self.to_variable(torch.randn(self.sample_size, self.z_dim))
fake_images = self.generator(noise)
sample_path = os.path.join(self.sample_path, 'fake_samples-final.png')
torchvision.utils.save_image(self.denorm(fake_images.data), sample_path, nrow=12)
print("Saved sampled images to '%s'" %sample_path)
# Calc Discrimi loss
for i, images in enumerate(self.data_loader):
images = self.to_variable(images)
outputs = self.discriminator(images)
fake_loss = torch.mean(outputs ** 2)
print(fake_loss)
# Calc Anomaly Scores ------------------------------------------------
답글삭제# added by PWW, 171024 for iscx2012 Anomaly Detection
sumASM=0.0
k=0
for i, images in enumerate(self.data_loader):
# X OK
X = self.to_variable(images)
#print("X = ", X)
# f(X) OK
f_X = self.discriminator(X)
#print("f_X = ", f_X)
# G(z) OK
G_Z = self.generator(noise)
# print("G_z = ", G_Z)
# f(G) OK
f_G = self.discriminator(G_Z)
# print("f_G = ", f_G)
# R(x) = abs(x-G(z))
R_X = torch.abs(X - G_Z)
#print("R_X = ", R_X)
# D(x) = abs(f(x)-f(G))
D_X = torch.abs(f_X - f_G)
#print("D_X = ", D_X)
# L(z) = (1-lambda)R(x) + (lamda)D(x)
anomalyScore = (1 - 0.5) * R_X + 0.5 * D_X
aScoreMean = torch.mean(anomalyScore)
# print("anomalyScore = ", aScoreMean)
sumASM += aScoreMean
k += 1
meanASM= sumASM/k
print("mean of anomalyScoreMean : ", meanASM)