import numpy as np
import matplotlib.pyplot as plt
import cv2
# Load input image (example image used)
image_height, image_width = 100, 100
# 1. Convert to Grayscale
gray_image = cv2.imread('./data/test2.png', 0)  # Load image in grayscale mode
# 2. Multi-Scale Feature Extraction
def downsample(image, scale):
    """Reduces the size of an image by the given scale factor.
    Args:
        image (ndarray): Input image.
        scale (int): Scale factor for downsampling.
    Returns:
        ndarray: Downsampled image.
    """
    h, w = image.shape
    new_h, new_w = h // scale, w // scale
    return image[::scale, ::scale]
def upsample(image, original_shape):
    """Enlarges an image to match the original shape.
    Args:
        image (ndarray): Input image.
        original_shape (tuple): Shape to upsample to (height, width).
    Returns:
        ndarray: Upsampled image.
    """
    scale_h = original_shape[0] // image.shape[0]
    scale_w = original_shape[1] // image.shape[1]
    return np.repeat(np.repeat(image, scale_h, axis=0), scale_w, axis=1)
# Define scales for multi-scale features
scales = [1, 2, 4]
multi_scale_features = [gray_image] + [downsample(gray_image, scale) for scale in scales]
# Initialize combined feature map with float64 type
combined_feature = np.zeros_like(gray_image, dtype=np.float64)
# Combine multi-scale features
for feature in multi_scale_features:
    if feature.shape != gray_image.shape:
        # Upsample smaller-scale images to match original image size
        feature = upsample(feature, gray_image.shape)
    combined_feature += feature
# Normalize the combined feature map after float operations
combined_feature /= len(multi_scale_features)
# Visualize results
plt.figure(figsize=(10, 5))
# Display the input grayscale image
plt.subplot(1, 2, 1)
plt.title("Input Image")
plt.imshow(gray_image, cmap="inferno")
plt.axis("off")
# Display the combined feature map
plt.subplot(1, 2, 2)
plt.title("Combined Feature")
plt.imshow(combined_feature, cmap="inferno")
plt.axis("off")
plt.show()
# 4. Sobel Edge Detection (Simple depth cue)
sobel_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])  # X-direction filter
sobel_y = np.array([[-1, -2, -1], [0, 0, 0], [1, 2, 1]])  # Y-direction filter
# Initialize gradient maps for X and Y directions
depth_x = np.zeros_like(combined_feature)
depth_y = np.zeros_like(combined_feature)
# Apply Sobel filters to calculate gradients
for i in range(1, combined_feature.shape[0] - 1):
    for j in range(1, combined_feature.shape[1] - 1):
        region = combined_feature[i-1:i+2, j-1:j+2]  # Extract 3x3 region
        depth_x[i, j] = np.sum(region * sobel_x)  # Convolve with Sobel X
        depth_y[i, j] = np.sum(region * sobel_y)  # Convolve with Sobel Y
# Compute final depth map as the magnitude of gradients
depth_map = np.sqrt(depth_x**2 + depth_y**2)
# 5. Normalize depth map to range 0-1
depth_map = (depth_map - depth_map.min()) / (depth_map.max() - depth_map.min())
# Visualize results
plt.title("Edge Detection")
plt.imshow(depth_map, cmap="inferno")
plt.axis("off")
plt.show()