영상처리 Chap07_영상 특징과 서술자 추출
Scipy 라이브러리의 signal 모듈을 이용하여 컨볼루션 Scipi API : https://docs.scipy.org/doc/scipy/reference/index.html signal.convolve2d() API : https://docs.scipy.org/doc/scipy/reference/generated/scipy.signal.convolve2d.html signal.convolve2d() 함수 이용하여 컨볼루션
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 |
from PIL import Image import matplotlib.pyplot as plt import numpy as np from scipy import signal #filter = [[1/9., 1/9., 1/9.], [1/9., 1/9., 1/9.], [1/9., 1/9., 1/9.]] #스무딩 filter = np.ones((11,11)) / 121 #filter = [[0., -1., 0.],[-1., 4., -1.],[0., -1., 0.]] # 라플라스 #filter = [[-1., 0., 1.],[-1., 0., 1.],[-1., 0., 1.]] # Prewitt 1 #filter = [[1., 1., 1.],[0., 0., 0.],[-1., -1., -1.]] # Prewitt 2 #filter = [[-1., 0., 1.],[-2., 0., 2.],[-1., 0., 1.]] # Sobel 1 #filter2 = [[1., 2., 1.],[0., 0., 0.],[-1., -2., -1.]] # Sobel 2 #filter = [[0., 1.],[-1., 0.]] # Robers 1 #filter = [[1., 0.],[0., -1.]] # Robers 2 im = Image.open('./images/chess.png') #im = Image.open('./images/chess_football.png') im = im.convert('L') im = np.array(im) im2 = signal.convolve2d(im, filter, mode='same') plt.figure(figsize=(10,5)) plt.subplot(1,2,1); plt.axis('off'); plt.imshow(im, cmap='gray') plt.subplot(1,2,2); plt.axis('off'); plt.imshow(im2, cmap='gray') |
skimage의 corner_harris() 함수를 사용하여 feature 찾기
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 |
from PIL import Image import matplotlib.pyplot as plt import numpy as np from skimage.feature import corner_harris #im = Image.open('./images/chess.png') im = Image.open('./images/chess_football.png') im2 = im.copy() im2 = im2.convert('L') im2 = np.array(im2) coordinates = corner_harris(im2, k=0.001) # k=0.2 im = np.array(im) im[coordinates>0.01*coordinates.max()]=[255,0,0,255] plt.figure(figsize=(10,5)) plt.axis('off') plt.imshow(im, cmap='gray') |
API : https://scikit-image.org/docs/stable/api/skimage.feature.html#corner-harris Ransac 알고리즘을 사용한 영상 매칭 영상1을 불러와서 affine변환한 영상2 생성후 두 영상에서 harris corner특징 찾기
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 |
from PIL import Image import matplotlib.pyplot as plt import numpy as np from skimage.io import imread from skimage.util import img_as_float from skimage.color import rgb2gray from skimage.feature import corner_harris, corner_peaks, corner_subpix from skimage.transform import AffineTransform, warp from skimage.exposure import rescale_intensity from skimage.measure import ransac temple = rgb2gray(img_as_float(imread('./images/temple.JPG'))) image_original = np.zeros(list(temple.shape) + [3]) image_original[..., 0] = temple mgrid = np.mgrid[0:image_original.shape[0], 0:image_original.shape[1]] gradient_row, gradient_col = (mgrid / float(image_original.shape[0])) image_original[..., 1] = gradient_row image_original[..., 2] = gradient_col image_original = rescale_intensity(image_original) # 밝기 조정 # 와핑 영상 생성- 크기 변경, 회전, 평행 이동 affine_trans = AffineTransform(scale=(0.8, 0.9), rotation=0.1, translation=(120, -20)) image_warped = warp(image_original, affine_trans .inverse, output_shape=image_original.shape) image_original_gray = rgb2gray(image_original) # 명암도 영상 image_warped_gray = rgb2gray(image_warped) # 해리스 코너 측을 사용하여 코너 추출 coordinates = corner_harris(image_original_gray) coordinates[coordinates > 0.01*coordinates.max()] = 1 coordinates_original = corner_peaks(coordinates, threshold_rel=0.0001, min_distance=5) coordinates = corner_harris(image_warped_gray) coordinates[coordinates > 0.01*coordinates.max()] = 1 coordinates_warped = corner_peaks(coordinates, threshold_rel=0.0001, min_distance=5) # 서브 화소 코너 위치 결정 coordinates_original_subpix = corner_subpix(image_original_gray, coordinates_original, window_size=9) coordinates_warped_subpix = corner_subpix(image_warped_gray, coordinates_warped, window_size=9) coordinates[coordinates < 0] = 0 # 결과 확인용 출력 print(temple.shape, image_original.shape, mgrid.shape) #print(temple) #print(image_original[..., 0]) #print(coordinates.shape, coordinates_original.shape) print(coordinates_original[:5]) print(coordinates_original_subpix[:5]) #np.savetxt('./a.txt', coordinates, fmt='%.1f') |
Ransac 알고리즘 함수 구현
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
# 중심 화소까지의 거리에 따라 화소 가중치 계산 def gaussian_weights(window_ext, sigma=1): y, x = np.mgrid[-window_ext:window_ext+1, -window_ext:window_ext+1] g_w = np.zeros(y.shape, dtype = np.double) g_w[:] = np.exp(-0.5 * (x**2 / sigma**2 + y**2 / sigma**2)) g_w /= 2 * np.pi * sigma * sigma return g_w def match_corner(coordinates, window_ext=3): row, col = np.round(coordinates).astype(np.intp) window_original = image_original[row-window_ext:row+window_ext+1, \ col-window_ext:col+window_ext+1, :] # 중심 화소까지의 거리에 따른 화소의 가중치 weights = gaussian_weights(window_ext, 3) weights = np.dstack((weights, weights, weights)) #왜곡 영상에서 모든 코너에 대한 차 제곱의 합 계산 SSDs = [] for row, col in coordinates_warped: window_warped = image_warped[row-window_ext:row+window_ext+1, \ col-window_ext:col+window_ext+1, :] if window_original.shape == window_warped.shape: SSD = np.sum(weights * (window_original - window_warped)**2) SSDs.append(SSD) # 일치관계로서 최소 SSD로 코너 사용 min_idx = np.argmin(SSDs) if len(SSDs) > 0 else -1 if min_idx >= 0: return coordinates_warped_subpix[min_idx] else: return [None] |