Compared to feature point detection and description, detecting and matching
line segments offer additional challenges. Yet, line features represent a
promising complement to points for multi-view tasks. Lines are indeed
well-defined by the image gradient, frequently appear even in poorly textured
areas and offer robust structural cues. We thus hereby introduce the first
joint detection and description of line segments in a single deep network.
Thanks to a self-supervised training, our method does not require any annotated
line labels and can therefore generalize to any dataset. Our detector offers
repeatable and accurate localization of line segments in images, departing from
the wireframe parsing approach. Leveraging the recent progresses in descriptor
learning, our proposed line descriptor is highly discriminative, while
remaining robust to viewpoint changes and occlusions. We evaluate our approach
against previous line detection and description methods on several multi-view
datasets created with homographic warps as well as real-world viewpoint
changes. Our full pipeline yields higher repeatability, localization accuracy
and matching metrics, and thus represents a first step to bridge the gap with
learned feature points methods. Code and trained weights are available at
https://github.com/cvg/SOLD2.
%0 Generic
%1 pautrat2021sold2
%A Pautrat, Rémi
%A Lin, Juan-Ting
%A Larsson, Viktor
%A Oswald, Martin R.
%A Pollefeys, Marc
%D 2021
%K cvpr21 detection line matching selfsupervised
%T SOLD2: Self-supervised Occlusion-aware Line Description and Detection
%U http://arxiv.org/abs/2104.03362
%X Compared to feature point detection and description, detecting and matching
line segments offer additional challenges. Yet, line features represent a
promising complement to points for multi-view tasks. Lines are indeed
well-defined by the image gradient, frequently appear even in poorly textured
areas and offer robust structural cues. We thus hereby introduce the first
joint detection and description of line segments in a single deep network.
Thanks to a self-supervised training, our method does not require any annotated
line labels and can therefore generalize to any dataset. Our detector offers
repeatable and accurate localization of line segments in images, departing from
the wireframe parsing approach. Leveraging the recent progresses in descriptor
learning, our proposed line descriptor is highly discriminative, while
remaining robust to viewpoint changes and occlusions. We evaluate our approach
against previous line detection and description methods on several multi-view
datasets created with homographic warps as well as real-world viewpoint
changes. Our full pipeline yields higher repeatability, localization accuracy
and matching metrics, and thus represents a first step to bridge the gap with
learned feature points methods. Code and trained weights are available at
https://github.com/cvg/SOLD2.
@misc{pautrat2021sold2,
abstract = {Compared to feature point detection and description, detecting and matching
line segments offer additional challenges. Yet, line features represent a
promising complement to points for multi-view tasks. Lines are indeed
well-defined by the image gradient, frequently appear even in poorly textured
areas and offer robust structural cues. We thus hereby introduce the first
joint detection and description of line segments in a single deep network.
Thanks to a self-supervised training, our method does not require any annotated
line labels and can therefore generalize to any dataset. Our detector offers
repeatable and accurate localization of line segments in images, departing from
the wireframe parsing approach. Leveraging the recent progresses in descriptor
learning, our proposed line descriptor is highly discriminative, while
remaining robust to viewpoint changes and occlusions. We evaluate our approach
against previous line detection and description methods on several multi-view
datasets created with homographic warps as well as real-world viewpoint
changes. Our full pipeline yields higher repeatability, localization accuracy
and matching metrics, and thus represents a first step to bridge the gap with
learned feature points methods. Code and trained weights are available at
https://github.com/cvg/SOLD2.},
added-at = {2021-12-16T13:41:24.000+0100},
author = {Pautrat, Rémi and Lin, Juan-Ting and Larsson, Viktor and Oswald, Martin R. and Pollefeys, Marc},
biburl = {https://www.bibsonomy.org/bibtex/2f092a0f073fbeeffd140e9b6365ec518/shuncheng.wu},
description = {https://arxiv.org/pdf/2104.03362.pdf},
interhash = {3ea41be846e2889641ea88ea62f7d16c},
intrahash = {f092a0f073fbeeffd140e9b6365ec518},
keywords = {cvpr21 detection line matching selfsupervised},
note = {cite arxiv:2104.03362Comment: 17 pages, Accepted at CVPR 2021 (Oral)},
timestamp = {2021-12-16T13:41:24.000+0100},
title = {SOLD2: Self-supervised Occlusion-aware Line Description and Detection},
url = {http://arxiv.org/abs/2104.03362},
year = 2021
}