pythonimage-processingsignal-processingfftdeconvolution

How to remove motion blur from a given image in frequency domain (deconvolution)?


I have read that if we have the PSF of a given image, using deconvolution we can reverse the distortion and get the original image. I tried to do the same thing, however, I'm getting an image that looks like complete noise:

def add_motion_blur(img, size=31, angle=11):
    kernel = np.zeros((size,size), dtype=np.float32)
    # set the middle row of the kernel array to be all ones. 
    kernel[size//2,:] = np.ones((size,),dtype=np.float32)
    # now rotate the kernel
    m = cv2.getRotationMatrix2D((size//2, size//2), angle=angle, scale=1.0)
    kernel = cv2.warpAffine(kernel, m, dsize=(size,size))
    kernel /= np.sum(kernel)
    return cv2.filter2D(img, ddepth=-1, kernel=kernel), kernel

img = cv2.imread('./img/peppers.jpg')
# adding motion blur is akin to creating the PSF because 
# it's a distorting operator, and we are distorting the image using it!
img_blurred, kernel = add_motion_blur(img, size=31, angle=60)
# now we should be able to deconvolve the image with the PSF and get the deblurred version 
# for this we can use the fourier transform and divide our blurred image from the psf
fft = np.fft.fftn(img_blurred[...,0])
# pad the kernel so its the same shape as our image so the division goes on without an issue
fft_psf = np.fft.fftn(kernel, s=fft.shape)
fft_result = fft/fft_psf
ifft_result = np.fft.ifft2(fft_result)
deblurred_image = np.abs(ifft_result).astype(np.uint8)

cv2.imshow('deblurred image',deblurred_image)
cv2.waitKey(0)
cv2.destroyAllWindows()

This results in pure noise it seems:

enter image description here

I also tried Wiener deconvolution to no avail:

# Wiener deconvolution
fft_psf_mag = np.conj(fft_psf) / (np.abs(fft_psf)**2 + 10)
fft_result = fft / fft_psf_mag

It results in the same result it seems:

enter image description here

What am I missing here? I also tried fft/(fft_psf+1e-3) as a way of regularizing the kernel's small values, but this does not result in any improvement either.

Side note

These are the previous outputs belonging to the original image, the blurred version and the kernel used:

enter image description here enter image description here enter image description here

Update:

Here is the updated version which:

  1. pads the kernel properly on all sides
  2. the imaginary values are close to 0 However the problem still persists and I can not get anything meaningful! The outputs are given at the end:
def add_motion_blur(img, size=31, angle=11):
    kernel = np.zeros((size,size), dtype=np.float32)
    # set the middle row of the kernel array to be all ones. 
    kernel[size//2,:] = np.ones((size,),dtype=np.float32)
    # now rotate the kernel
    m = cv2.getRotationMatrix2D((size//2, size//2), angle=angle, scale=1.0)
    kernel = cv2.warpAffine(kernel, m, dsize=(size,size))
    kernel /= np.sum(kernel)
    return cv2.filter2D(img, ddepth=-1, kernel=kernel), kernel

img = cv2.imread('./img/peppers.jpg')
# adding motion blur is akin to creating the PSF because 
# it's a distorting operator, and we are distorting the image using it!
img_blurred, kernel = add_motion_blur(img, size=31, angle=60)
# now we should be able to deconvolve the image with the PSF and get the deblurred version 
# for this we can use the fourier transform and divide our blurred image from the psf
fft = np.fft.fftn(img_blurred[...,0])
# pad the kernel equally on all sides, so that it sits at the center!
pad_height = img.shape[0] - kernel.shape[0]
pad_width = img.shape[1] - kernel.shape[1]
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
kernel_padded = np.pad(kernel, [(pad_top, pad_bottom), (pad_left, pad_right)], 'constant')
fft_psf = np.fft.fft2(kernel_padded)
# the normal way doesnt work!
# fft_result = fft/fft_psf
# wiener deconvolution (0.8 is the best I could find to get something that could be visualized somewhat properly)
fft_psf_mag = np.conj(fft_psf) / (np.abs(fft_psf)**2 + 0.8)
fft_result = fft * fft_psf_mag
# get back the image
ifft_result = np.fft.ifft2(fft_result)
# Check if the imaginary part is close to zero
assert np.all(np.isclose(np.imag(ifft), 0, atol=1e-8)), 'imaginary values must be close to 0 otherwise something is wrong!'
# grab the final image
img_abs = np.abs(ifft_result).astype(np.uint8)
img_real = ifft_result.real.astype(np.uint8)

cv2.imshow('padded kernel',cv2.normalize(kernel_padded,None,0,255,cv2.NORM_MINMAX))
cv2.imshow('deblurred(img.real)', img_real)
cv2.imshow('deblurred(np.abs(img))', img_abs)
cv2.waitKey(0)
cv2.destroyAllWindows()

Here are the outputs(note using .real values for image visualization is inferior to the magnitude based version:

enter image description here
enter image description here enter image description here

Update 2:

Here is the updated version which:

  1. pads the kernel properly according to this answer
  2. images are clipped properly using the real values and not their magnitudes based on the explanations here
  3. used a much lower epsilon and got a much better output
  4. instead of selecting a single channel from the BGR image, I used the grayscale version which resulted in a much more clearer output.(different channels have different brightness, so they lead to different outcomes, using grayscale version removes this discrepancy.

However, playing with epsilon, while results in a better output compared to the previous attempts, it introduces some artifacts/patterns into the final deblurred result which is not indented or wanted.

Is this the best I can hope for, or is there still space for improvements?

The outputs are given at the end:


def add_motion_blur(img, size=31, angle=11):
    kernel = np.zeros((size,size), dtype=np.float32)
    # Set the middle row of the kernel array to be all ones. 
    kernel[size//2,:] = np.ones((size,),dtype=np.float32)
    # Now rotate the kernel
    m = cv2.getRotationMatrix2D((size//2, size//2), angle=angle, scale=1.0)
    kernel = cv2.warpAffine(kernel, m, dsize=(size,size))
    kernel /= np.sum(kernel)
    return cv2.filter2D(img, ddepth=-1, kernel=kernel), kernel

img = cv2.imread('./img/peppers.jpg')
img_blurred, kernel = add_motion_blur(img, size=31, angle=60)
# Use the grayscale image instead
img_blurred_gray = cv2.cvtColor(img_blurred,cv2.COLOR_BGR2GRAY)
fft = np.fft.fftn(img_blurred_gray)
# Pad the kernel properly, and then shift it
pad_height = img.shape[0] - kernel.shape[0]
pad_width = img.shape[1] - kernel.shape[1]
pad_top = pad_height // 2
pad_bottom = pad_height - pad_top
pad_left = pad_width // 2
pad_right = pad_width - pad_left
kernel_padded = np.pad(kernel, [(pad_top, pad_bottom), (pad_left, pad_right)], 'constant')
# shift the kernel
kernel_padded = np.fft.ifftshift(kernel_padded)
fft_psf = np.fft.fft2(kernel_padded)
# The normal way doesnt work very well, but after correcting the kernel, it works much better than the previous attempt! 2 as the regularizer/epsilon seems to give somewhat good result compared to other values.
# fft_result = fft/(fft_psf+2)
# Wiener deconvolution works much better, 0.01 seems like a good eps, but it introduces weird artifacts in the image. 
fft_psf_mag = np.conj(fft_psf) / (np.abs(fft_psf)**2 + 0.01)
fft_result = fft * fft_psf_mag
# Get back the image-no need to shift-back the result!
ifft_result = np.fft.ifft2(fft_result)
# Check if the imaginary part is close to zero
assert np.all(np.isclose(np.imag(ifft), 0, atol=1e-8)), 'imaginary values must be close to 0 otherwise something is wrong!'
# Grab the final image
# Clip the values for more accurate visualization
img_real = ifft_result.real.clip(0,255).astype(np.uint8)

cv2.imshow('padded kernel',cv2.normalize(kernel_padded,None,0,255,cv2.NORM_MINMAX))
cv2.imshow('deblurred(img.real)', img_real)
cv2.waitKey(0)
cv2.destroyAllWindows()

enter image description here enter image description here

Update 3:

Here is the updated version which:

  1. fixes the artifacts caused by deblurring by expanding the input image followed by cropping the final result to the original image dimension according to the Cris Luengo's explanations here
  2. individual channels are operated on and later aggregated to allow for color image deblurring (Cris Luengo's explanations here).

After these changes, the result look very good. The outputs are given at the end:


def add_motion_blur(img, size=31, angle=11):
    kernel = np.zeros((size,size), dtype=np.float32)
    # Set the middle row of the kernel array to be all ones. 
    kernel[size//2,:] = np.ones((size,),dtype=np.float32)
    # Now rotate the kernel
    m = cv2.getRotationMatrix2D((size//2, size//2), angle=angle, scale=1.0)
    kernel = cv2.warpAffine(kernel, m, dsize=(size,size))
    kernel /= np.sum(kernel)
    return cv2.filter2D(img, ddepth=-1, kernel=kernel), kernel

img = cv2.imread('./img/peppers.jpg')
# so lets add 50px padding to each side of our image 
pad=50
img = cv2.copyMakeBorder(img, pad,pad,pad,pad,borderType=cv2.BORDER_REFLECT)

img_blurred, kernel = add_motion_blur(img,size=31,angle=60)

pad_height = img.shape[0] - kernel.shape[0] 
pad_width = img.shape[1] - kernel.shape[1]
pad_top = (pad_height+1)//2
pad_bottom = pad_height//2
pad_left = (pad_width+1)//2
pad_right = pad_width//2
kernel_padded = np.pad(kernel, [(pad_top, pad_bottom),(pad_left, pad_right)],mode='constant')
kernel_padded = np.fft.fftshift(kernel_padded)
fft_psf = np.fft.fft2(kernel_padded)

# now lets take the fft of our image
ffts = np.fft.fftn(img_blurred)

# weiner deconvolution
eps = 0.01
fft_psf_mag = np.conj(fft_psf)/(np.abs(fft_psf)**2 + eps)
# individually multiply each channel and then aggregate them
fft_result = np.array([ffts[...,i] * fft_psf_mag for i in range(3)]).transpose(1,2,0)

# now lets get back the image.
iffts = np.fft.ifftn(fft_result)

# before we continue, lets makesure the imaginary components are close to zero
assert np.all(np.isclose(iffts.imag, 0, atol=1e-8)), 'imaginary values must be close to zero! or something is worng'

# take the image 
img_deblurred = iffts.real.clip(0,255).astype(np.uint8)
# crop the final image
img_deblurred_cropped = img_deblurred[pad:img.shape[0]-pad, pad:img.shape[1]-pad]

cv2.imshow('img',img)
cv2.imshow('kenel_padded', cv2.normalize(kernel_padded,None,0,255,cv2.NORM_MINMAX))
cv2.imshow('img-blurred', img_blurred)
cv2.imshow('img-deblurred-uncropped', img_deblurred)
cv2.imshow('img-deblurred-cropped', img_deblurred_cropped)
cv2.waitKey(0)
cv2.destroyAllWindows()

result:

enter image description here enter image description here enter image description here enter image description here enter image description here


Solution

  • side note:
    Thanks to @Cris Luengo, the issues were identified and corrected. Here is a summary of all of the points addressed in the comment section which lead to the final solution:

    Long Answer:
    There are several issues at play here, which are as follows:

    1. The Wiener deconvolution algorithm is implemented incorrectly, the correct form needs the multiplication not division:
       # Wiener deconvolution
       fft_psf_mag = np.conj(fft_psf) / (np.abs(fft_psf)**2 + 10)
       fft_result = fft * fft_psf_mag
    
    
    1. the +10 component, i.e. epsilon value is too much. it represents 1/snr. when the correction is applied, using a smaller value of epsilon yields a sharper result, the larger the value, the blurrier the outcome. values in the range of 0.01, 0.001 should be good starting points. (remember you can always try using trackbars and watching the changes realtime to get the best results!)

    2. The way the kernel is being padded is wrong. fft_psf = np.fft.fftn(kernel, s=fft.shape) will put the kernel at the top left corner. This doesn't cause any noticeable change in the output though, but the issue is: "@Cris Luengo:

    If you pad only to the right and bottom, then the kernel is shifted by some small amount from the true origin. The larger the kernel, the larger this shift is. You might not have noticed the small shift in the output, but it’s there, and if you overlay the result and original images you will see it."

    note that we're dealing with the spatial domain there (i.e. it’s about the position of the kernel in the image before applying FFT).

    to cut a long story short, the proper way would be to pad the kernel so the kernel sits at the center of the image and then shift it so the kernel sits at the corners of the image, i.e. do :

    
    pad_height = img.shape[0] - kernel.shape[0] 
    pad_width = img.shape[1] - kernel.shape[1]
    pad_top = (pad_height+1)//2
    pad_bottom = pad_height//2
    pad_left = (pad_width+1)//2
    pad_right = pad_width//2
    kernel_padded = np.pad(kernel, [(pad_top, pad_bottom),(pad_left, pad_right)],mode='constant')
    kernel_padded = np.fft.fftshift(kernel_padded)
    fft_psf = np.fft.fft2(kernel_padded)
    

    Here's how the output looks when the wrong vs the right kernel is used, note the output seems identical at first glance, but when overlayed on the original image, the wrong one reveals its shifted nature!(we can also see artifacts in this case, especially towards the bottom and bottom right corner):

    4.To get rid of the artifacts in the final image, one can expand the image dimensions by padding all 4 sides, running the operation and finally cropping the final deblurred image to the original image size. This way, by avoiding sharp high frequency drop-offs, we avoid the artifacts:(edgetaper can be used to deal with ring artifacts as well. Here's another implementation in C++. The Python implementation is provided below)

    pad = 50
    img = cv2.copyMakeBorder(img, pad, pad, pad, pad, borderType=cv2.BORDER_REFLECT)
    ...
    # crop the final image
    img_deblurred_cropped = img_deblurred[pad:img.shape[0]-pad, pad:img.shape[1]-pad]
    

    edgetaper in Python:

    def edgetaper(img, gamma=5, beta=0.2):
        width,height = img.shape[:2]
        dx = 2 * np.pi / width
        dy = 2 * np.pi / height
        # subtract dx and dy to match original function's range
        x = np.linspace(-np.pi, np.pi-dx, width)
        y = np.linspace(-np.pi, np.pi-dy, height)
        w1 = 0.5 * (np.tanh((x + gamma / 2) / beta) - np.tanh((x - gamma / 2) / beta))
        w2 = 0.5 * (np.tanh((y + gamma / 2) / beta) - np.tanh((y - gamma / 2) / beta))
        w = np.dot(w2.reshape(-1, 1), w1.reshape(1, -1))    
        if img.ndim>2:
            w = w[:, :, np.newaxis].repeat(img.shape[2], axis=2)
        return cv2.multiply(img.astype(np.float32), w.astype(np.float32)).clip(0,255).astype(np.uint8)
    

    Side Notes:

    1. Use the real components instead of their magnitudes, remember to do proper clipping (i.e. clip(0, 255)) before casting to uint8.
      from Cris Luengo

    Taking the absolute value, though common, is incorrect. For example, you might want to apply a filter to an image that contains negative values, or apply a filter that produces negative values. Taking the absolute value here would create artefacts. If the output of the inverse FFT contains imaginary values significantly different from zero, then there is an error in the way that the filtering kernel was padded.

    1. To deblur the color image, simply apply the Wiener's deconvolution on each channel separately, then aggregate them. that is :
    ffts = np.fft.fftn(img_blurred)
    ...
    fft_psf_mag = np.conj(fft_psf)/(np.abs(fft_psf)**2 + 1/snr)
    # individually multiply each channel and then aggregate them
    fft_result = np.array([ffts[...,i] * fft_psf_mag for i in range(3)]).transpose(1,2,0)
    # now lets get back the image.
    iffts = np.fft.ifftn(fft_result)
    # clip and crop to get the final image
    
    1. Always make sure the imaginary components of your final ifft are close to zero, otherwise this means something is wrong!
    assert np.all(np.isclose(iffts.imag, 0, atol=1e-8)), 'imaginary values must be close to zero! or something is worng'
    
    

    Here is the complete code for deblurring color images using Weiner's deconvolution:

    def add_motion_blur(img, size=31, angle=11):
        kernel = np.zeros((size,size), dtype=np.float32)
        # Set the middle row of the kernel array to be all ones. 
        kernel[size//2,:] = np.ones((size,),dtype=np.float32)
        # Now rotate the kernel
        m = cv2.getRotationMatrix2D((size//2, size//2), angle=angle, scale=1.0)
        kernel = cv2.warpAffine(kernel, m, dsize=(size,size))
        kernel /= np.sum(kernel)
        return cv2.filter2D(img, ddepth=-1, kernel=kernel), kernel
    
    img_org = cv2.imread('./img/peppers.jpg')
    # so lets add 50px padding to each side of our image 
    pad=50
    img = cv2.copyMakeBorder(img_org, pad,pad,pad,pad,borderType=cv2.BORDER_REFLECT)
    
    img_blurred, kernel = add_motion_blur(img, size=31, angle=60)
    
    pad_height = img.shape[0] - kernel.shape[0] 
    pad_width = img.shape[1] - kernel.shape[1]
    pad_top = (pad_height+1)//2
    pad_bottom = pad_height//2
    pad_left = (pad_width+1)//2
    pad_right = pad_width//2
    kernel_padded = np.pad(kernel, [(pad_top, pad_bottom),(pad_left, pad_right)],mode='constant')
    kernel_padded = np.fft.fftshift(kernel_padded)
    fft_psf = np.fft.fft2(kernel_padded)
    
    # now lets take the fft of our image
    ffts = np.fft.fftn(img_blurred)
    
    # Weiner deconvolution
    # instead of eps, use snr
    snr = 100
    fft_psf_mag = np.conj(fft_psf)/(np.abs(fft_psf)**2 + 1/snr)
    # individually multiply each channel and then aggregate them
    fft_result = np.array([ffts[...,i] * fft_psf_mag for i in range(3)]).transpose(1,2,0)
    
    # now lets get back the image.
    iffts = np.fft.ifftn(fft_result)
    
    # make sure the imaginary components are close to zero
    assert np.all(np.isclose(iffts.imag, 0, atol=1e-8)), 'imaginary values must be close to zero! or something is worng'
    
    # take the image 
    img_deblurred = iffts.real.clip(0,255).astype(np.uint8)
    # crop the final image
    img_deblurred_cropped = img_deblurred[pad:img.shape[0]-pad, pad:img.shape[1]-pad]
    
    # When the right kernel with proper padding and shifting is used, this looks identical to original image, 
    # otherwise, you can see one image is shifted, or worse the whole image is ugly/corrupted looking because one uses the wrong kernel!
    overlay = cv2.addWeighted(img_org, 0.5, img_deblurred_cropped, 0.5, 0)
    
    cv2.imshow('img',img)
    cv2.imshow('kenel_padded', cv2.normalize(kernel_padded,None,0,255,cv2.NORM_MINMAX))
    cv2.imshow('img-blurred', img_blurred)
    cv2.imshow('img-deblurred-uncropped', img_deblurred)
    cv2.imshow('img-deblurred-cropped', img_deblurred_cropped)
    cv2.imshow('overlay',overlay)
    cv2.waitKey(0)
    cv2.destroyAllWindows()
    

    The outputs:

    enter image description here enter image description here enter image description here enter image description here enter image description here enter image description here