통합 코드: RGB+IR 기반 FDTD 시뮬레이션 및 센서 예측 컬러 이미지 복원
이제 네 가지 기능을 하나의 코드로 통합하여,
✅ Object Image 로드 및 Bayer 필터 적용
✅ Angular Spectrum Method 기반 센서 입사 Color Image 계산
✅ Pixel Array 기반 MTF 계산 및 Interpolation
✅ 센서에서 예측된 컬러 이미지 복원
각 기능을 독립적인 함수로 구현하여, 코드 실행 시 단계별 결과를 얻을 수 있도록 설계.
🔹 최종 통합 코드
import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import griddata
from scipy.fft import fft2, ifft2, fftshift, ifftshift
from scipy.ndimage import gaussian_filter
from skimage import io, transform
# === [1] 파라미터 정의 ===
dx = 0.1e-6 # 공간 샘플링 (0.1μm)
sensor_size_x = 5.76e-3 # 센서 가로 크기 (5.76mm)
sensor_size_y = 4.29e-3 # 센서 세로 크기 (4.29mm)
pixel_size = 0.8e-6 # 픽셀 크기 (0.8μm)
pixel_array_size = (5, 5) # 5×5 Pixel Array
focal_length = 5.0e-3 # 렌즈 초점 거리 (5mm)
f_number = 2.8 # F-Number
object_size = 10e-3 # Object Image 크기 (10mm)
# 파장 정의 (R, G, B, IR)
wavelengths = {
"R": 650e-9, # Red
"G": 550e-9, # Green
"B": 450e-9, # Blue
"IR": 850e-9 # Infrared
}
# 11개 Field 위치 설정
field_positions = {
"field_1": (-sensor_size_x/2, 0),
"field_2": (-sensor_size_x/4, 0),
"field_3": (0, 0),
"field_4": (sensor_size_x/4, 0),
"field_5": (sensor_size_x/2, 0),
"field_6": (-sensor_size_x/4, -sensor_size_y/4),
"field_7": (sensor_size_x/4, sensor_size_y/4),
"field_8": (0, sensor_size_y/2),
"field_9": (0, -sensor_size_y/2),
"field_10": (0, sensor_size_y/4),
"field_11": (0, -sensor_size_y/4)
}
# === [2] Object Image 로드 및 Bayer 필터 정의 ===
def load_and_resize_image(image_path, target_size=512):
image = io.imread(image_path)
image_resized = transform.resize(image, (target_size, target_size), anti_aliasing=True)
return image_resized
def create_bayer_filter(array_size):
pattern = np.array([[0, 1], [1, 2]]) # 0: Red, 1: Green, 2: Blue
return np.tile(pattern, (array_size[0]//2, array_size[1]//2))
bayer_filter = create_bayer_filter(pixel_array_size)
# === [3] Angular Spectrum Method 기반 Sensor 입사 Color Image 생성 ===
def angular_spectrum_method(E0, wavelength, z, x_pos, y_pos):
k = 2 * np.pi / wavelength
grid_size = E0.shape[0]
E0_spectrum = fftshift(fft2(ifftshift(E0)))
fx = np.fft.fftfreq(grid_size, d=pixel_size)
fy = np.fft.fftfreq(grid_size, d=pixel_size)
FX, FY = np.meshgrid(fx, fy)
theta = np.arctan(np.sqrt(x_pos**2 + y_pos**2) / focal_length)
kx = k * np.sin(theta)
phase_tilt = np.exp(1j * (kx * FX))
k_z_squared = 1 - (wavelength * FX)**2 - (wavelength * FY)**2
k_z_squared[k_z_squared < 0] = 0
H = np.exp(1j * k * np.sqrt(k_z_squared))
E_propagated_spectrum = E0_spectrum * H * phase_tilt
return ifftshift(ifft2(fftshift(E_propagated_spectrum)))
sensor_fields = {color: {} for color in wavelengths}
z_planes = [0, 1e-6]
for color, wavelength in wavelengths.items():
for field, (x0, y0) in field_positions.items():
for z in z_planes:
field_data = angular_spectrum_method(np.ones((256, 256)), wavelength, z, x0, y0)
sensor_fields[color][f"{field}_z_{z}"] = field_data
np.savetxt(f"{color}_{field}_z_{z}.dat", np.real(field_data), delimiter=",")
# === [4] Pixel Array 기반 MTF 계산 ===
def extract_pixel_signals(sensor_data, bayer_filter):
signals = np.zeros_like(bayer_filter, dtype=np.float32)
for i in range(bayer_filter.shape[0]):
for j in range(bayer_filter.shape[1]):
signals[i, j] = np.mean(np.abs(sensor_data[i::5, j::5]))
return signals
def compute_mtf(signals):
edge_response = gaussian_filter(signals, sigma=1)
return np.abs(np.fft.fft(edge_response))
def interpolate_mtf(mtf_data, sensor_size_x, sensor_size_y):
x = np.linspace(0, sensor_size_x, mtf_data.shape[1])
y = np.linspace(0, sensor_size_y, mtf_data.shape[0])
X, Y = np.meshgrid(x, y)
return X, Y, mtf_data
sensor_mtf = compute_mtf(extract_pixel_signals(sensor_fields["G"]["field_3_z_0"], bayer_filter))
X, Y, interpolated_mtf = interpolate_mtf(sensor_mtf, sensor_size_x, sensor_size_y)
# === [5] 센서에서 예측한 컬러 이미지 복원 ===
def reconstruct_bayer_image(sensor_data, bayer_filter):
img_shape = (sensor_data.shape[0] * 5, sensor_data.shape[1] * 5, 3)
reconstructed_image = np.zeros(img_shape, dtype=np.float32)
for i in range(bayer_filter.shape[0]):
for j in range(bayer_filter.shape[1]):
color_channel = bayer_filter[i, j]
reconstructed_image[i::5, j::5, color_channel] = sensor_data[i, j]
return reconstructed_image
def demosaic_bayer_image(bayer_image):
red, green, blue = bayer_image[:, :, 0], bayer_image[:, :, 1], bayer_image[:, :, 2]
red[red == 0] = np.mean(red[red > 0])
green[green == 0] = np.mean(green[green > 0])
blue[blue == 0] = np.mean(blue[blue > 0])
return np.stack((red, green, blue), axis=2)
sensor_data_green = extract_pixel_signals(sensor_fields["G"]["field_3_z_0"], bayer_filter)
bayer_image = reconstruct_bayer_image(sensor_data_green, bayer_filter)
reconstructed_rgb_image = demosaic_bayer_image(bayer_image)
# === [6] 결과 시각화 ===
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.imshow(reconstructed_rgb_image)
plt.title("Demosaiced RGB Image")
plt.axis("off")
plt.subplot(1, 2, 2)
plt.imshow(interpolated_mtf, cmap='hot')
plt.title("MTF Distribution over Sensor")
plt.axis("off")
plt.show()
'Camera' 카테고리의 다른 글
Diffractsim 수동 설치 방법 (0) | 2025.02.12 |
---|---|
Python 에서 CUDA 사용법 (0) | 2025.02.12 |
ASM 기반 Pixel Array Center/Edge 이미지 시뮬레이션 (1) | 2025.02.10 |
Lens PSF Complex Vectorial Field Data (0) | 2025.02.10 |
[Optics] Diffraction 현상에 대한 Python 모듈 (1) | 2023.08.18 |