1 year ago

#276817

test-img

Oth Mane

Improving accuracy of Homography + SIFT method

I'm using Homography + SIFT to find out the position of each object's pixels in image source in image distination, i've got a very good matching using sift but when i tested the homography results some points of image source placed in wrong position in image distination for exemple: image source image distination matching image

this is my code:

image1_points = []
img = cv.imread('../dataset/masks-crop/7.png', cv.CV_16U)          # queryImage

plt.imshow(img)
for i in range(img.shape[0]):
    for j in range(img.shape[1]):
        if(img[i, j] > 0):
            image1_points.append(j)
            image1_points.append(i)
            
#-------------------------------------------------------------------------------


MIN_MATCH_COUNT = 10
img1 = cv.imread('../dataset/views-crop/view_7/016.png', cv.CV_16U)          # sourceImage
img2 = cv.imread('../dataset/views-crop/view_8/024.png', cv.CV_16U)          # distImage
# Initiate SIFT detector
sift = cv.SIFT_create()
img1 = cv.normalize(img1, None, 0, 255, cv.NORM_MINMAX, dtype=cv.CV_8U)
img2 = cv.normalize(img2, None, 0, 255, cv.NORM_MINMAX, dtype=cv.CV_8U)
# find the keypoints and descriptors with SIFT
kp1, des1 = sift.detectAndCompute(img1,None)
kp2, des2 = sift.detectAndCompute(img2,None)
FLANN_INDEX_KDTREE = 1
index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
search_params = dict(checks = 50)
flann = cv.FlannBasedMatcher(index_params, search_params)
matches = flann.knnMatch(des1,des2,k=2)
# store all the good matches as per Lowe's ratio test.
good = []
for m,n in matches:
    if m.distance < 0.75*n.distance:
        good.append(m)
        
#-------------------------------------------------------------------------------
    
if len(good)>=MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    M, mask = cv.findHomography(src_pts, dst_pts, cv.RANSAC, 50.0)

    matchesMask = mask.ravel().tolist()
    h,w = img1.shape
    print(len(image1_points))
    pts = np.float32(image1_points).reshape(-1,1,2)
    dst = cv.perspectiveTransform(pts,M)
    
    
else:
    print( "Not enough matches are found - {}/{}".format(len(good), MIN_MATCH_COUNT) )
    matchesMask = None
    
#-------------------------------------------------------------------------------

    
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = None,
                   matchesMask = matchesMask, # draw only inliers
                   flags = 2)
img3 = cv.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)
plt.imshow(img3, 'gray'),plt.show()
cv.imwrite('./matching.png', img3)

#-------------------------------------------------------------------------------


i1 = cv.imread('../dataset/views-crop/view_7/016.png', cv.CV_16U)          # sourceImage
i2 = cv.imread('../dataset/views-crop/view_8/024.png', cv.CV_16U)          # distImage

i1 = cv.circle(i1, (int(pts[4000][0][0]),int(pts[4000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[4000][0][0])),int(round(dst[4000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[10000][0][0]),int(pts[10000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[10000][0][0])),int(round(dst[10000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[30000][0][0]),int(pts[30000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[30000][0][0])),int(round(dst[30000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[20000][0][0]),int(pts[20000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[20000][0][0])),int(round(dst[20000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[500][0][0]),int(pts[500][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[500][0][0])),int(round(dst[500][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[22000][0][0]),int(pts[22000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[22000][0][0])),int(round(dst[22000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[21600][0][0]),int(pts[21600][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[21600][0][0])),int(round(dst[21600][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[21550][0][0]),int(pts[21550][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[21550][0][0])),int(round(dst[21550][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[21690][0][0]),int(pts[21690][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[21690][0][0])),int(round(dst[21690][0][1]))), 1, [255,0,0], 8)


i1 = cv.circle(i1, (int(pts[21690][0][0]),int(pts[21690][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[21690][0][0])),int(round(dst[21690][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[23690][0][0]),int(pts[23690][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[23690][0][0])),int(round(dst[23690][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[23990][0][0]),int(pts[23990][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[23990][0][0])),int(round(dst[23990][0][1]))), 1, [255,0,0], 8)


i1 = cv.circle(i1, (int(pts[25690][0][0]),int(pts[25690][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[25690][0][0])),int(round(dst[25690][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[12000][0][0]),int(pts[12000][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[12000][0][0])),int(round(dst[12000][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[12800][0][0]),int(pts[12800][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[12800][0][0])),int(round(dst[12800][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[12900][0][0]),int(pts[12900][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[12900][0][0])),int(round(dst[12900][0][1]))), 1, [255,0,0], 8)

i1 = cv.circle(i1, (int(pts[28200][0][0]),int(pts[28200][0][1])), 1, [255,0,0], 8)
i2 = cv.circle(i2, (int(round(dst[28200][0][0])),int(round(dst[28200][0][1]))), 1, [255,0,0], 8)

plt.imshow(i1)
plt.show()
plt.imshow(i2)
plt.show()

cv.imwrite('./testing/i1.png',i1)
cv.imwrite('./testing/i2.png',i2)

Edit

after i increased matches number: image source image distination matching image

python

opencv

computer-vision

sift

0 Answers

Your Answer

Accepted video resources