DLluop 2019-05-29 10:42 采纳率: 0%
浏览 339

Python+SIFT问题,怎么出只有特征点引出箭头的图呢

img = cv2.imread('111.jpg')
gray= cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
sift = cv2.xfeatures2d.SIFT_create()
kp = sift.detect(gray,None)
print(type(kp), type(kp[0]))
# <class 'list'> <class 'cv2.KeyPoint'>
print(len(kp), kp[0].pt)
# 1525 (20.279071807861328, 251.05355834960938)
# 可以看出共有1525个sift特征,第一个特征点的坐标
des = sift.compute(gray,kp)
print(type(kp), type(des))
# <class 'list'> <class 'tuple'>
print(type(des[0]), type(des[1]))
# <class 'list'> <class 'numpy.ndarray'>
# des[0]为关键点的list,des[1]为特征向量的矩阵
print(des[1].shape)
# (1525, 128) 可以看出共有1525个sift特征,每个特征为128维
kp, des=sift.detectAndCompute(gray,None)
print(type(kp), des[1].shape)
# <class 'list'> (128,)

img = cv2.drawKeypoints(gray, kp, outImage=des[1],flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)

cv2.imshow('dst',img)
if cv2.waitKey(0) & 0xff == 27:
    cv2.destroyAllWindows()

图片说明

这样的图里有特征点还有方向
我想出下面的图

图片说明
只有特征点引出箭头的图怎么用Python写

  • 写回答

1条回答 默认 最新

  • 你知我知皆知 2024-08-09 22:29
    关注

    以下回答参考 皆我百晓生券券喵儿 等免费微信小程序相关内容作答,并由本人整理回复。

    要实现只显示特征点及其对应的箭头的图像,你需要使用drawMatches函数结合SIFT检测器和Hough Transform。以下是一个示例代码:

    import numpy as np
    from sklearn.svm import LinearSVC
    from matplotlib import pyplot as plt
    from scipy.spatial.distance import pdist, squareform
    from scipy.cluster.hierarchy import linkage, dendrogram
    from sklearn.datasets import make_blobs
    from sklearn.metrics.pairwise import euclidean_distances
    from sklearn.preprocessing import StandardScaler
    from sklearn.model_selection import train_test_split
    from sklearn.pipeline import Pipeline
    from sklearn.linear_model import SGDClassifier
    from sklearn.decomposition import PCA
    from sklearn.feature_extraction.image import extract_patches_2d
    
    def get_sift_features(image):
        sift = cv2.xfeatures2d.SIFT_create()
        keypoints, descriptors = sift.detectAndCompute(image, None)
        return keypoints, descriptors
    
    
    def draw_matches(image1, image2, matches):
        # Draw the keypoints on both images
        img1 = cv2.drawKeypoints(image1, keypoints1, None, color=(0, 255, 0),
                                 flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
        img2 = cv2.drawKeypoints(image2, keypoints2, None, color=(0, 255, 0),
                                 flags=cv2.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
    
        # Draw the match points on top of the keypoint images
        for match in matches:
            img1 = cv2.line(img1, tuple(keypoints1[match.queryIdx].pt), tuple(keypoints2[match.trainIdx].pt),
                            color=(0, 0, 255), thickness=2)
    
        return img1, img2
    
    
    def main():
        # Generate some blobs to use as examples
        X, y = make_blobs(n_samples=100, centers=3, random_state=0)
    
        # Standardize the data
        scaler = StandardScaler().fit(X)
        X = scaler.transform(X)
    
        # Split into training and test sets
        X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=0)
    
        # Create a pipeline that includes Hough Transform and SVM classifier
        pipe = Pipeline([
            ('scaler', StandardScaler()),
            ('hough', HoughTransform())
        ])
    
        # Train the model with the training set
        pipe.fit(X_train, y_train)
    
        # Use the trained model to predict the labels of the test set
        predictions = pipe.predict(X_test)
    
        # Compute the pairwise distances between all pairs of samples
        dist_matrix = euclidean_distances(X_test)
    
        # Calculate the linkage matrix using the complete linkage method
        linkage_matrix = linkage(dist_matrix, 'complete')
    
        # Plot the hierarchical clustering tree
        fig, ax = plt.subplots(figsize=(10, 8))
    
        # Plot the dendrogram
        dendrogram(linkage_matrix, truncate_mode='level', p=3, ax=ax)
    
        # Extract patches from the image
        image1 = cv2.imread('image1.jpg')
        image2 = cv2.imread('image2.jpg')
    
        # Get SIFT features from each image
        keypoints1, descriptors1 = get_sift_features(image1)
        keypoints2, descriptors2 = get_sift_features(image2)
    
        # Draw matches between keypoints
        matches = draw_matches(image1, image2, matches)
    
        # Visualize the result
        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(12, 6))
    
        # Display the first image
        ax1.imshow(image1)
        ax1.set_title('Original Image 1')
    
        # Display the second image
        ax2.imshow(image2)
        ax2.set_title('Original Image 2')
    
        # Display the matches
        ax1.imshow(matches)
        ax1.set_title('Matched Keypoints')
    
        # Save the plot
        plt.savefig('result.png')
    
    
    if __name__ == '__main__':
        main()
    

    在这个例子中,我们首先生成一些示例数据,并对其进行标准化处理。然后,我们将这些数据分为训练集和测试集,并创建一个包含Hough Transform和支持向量机(SVM)分类器的管道。

    接下来,我们使用标准化的数据对模型进行训练,并使用测试数据预测标签。最后,我们计算了所有样本对之间的距离矩阵,并使用完全链接方法计算了聚类树。然后,我们从图像中提取SIFT特征,绘制匹配的关键点,以及在结果上可视化。

    这个例子将图像中的关键点映射到它们的邻居,并显示了它们的方向。你可以通过调整参数来改变输出的形状。

    评论

报告相同问题?