Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: early draft PR for SLAM #611

Draft
wants to merge 34 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
34 commits
Select commit Hold shift + click to select a range
6ab08b7
chore: reformat code and remove comment
fabianschenk Jun 4, 2020
bbf250a
Merge branch 'feat-datastructures-opensfm' into feat-datastructures-slam
fabianschenk Jun 4, 2020
3786a07
Merge branch 'feat-datastructures-opensfm' into feat-datastructures-slam
fabianschenk Jun 4, 2020
7f04c1d
Merge branch 'master' into feat-datastructures-slam
fabianschenk Jun 4, 2020
170423d
feat: slam draft, wip
fabianschenk Jun 6, 2020
f06cf2a
fix: projection error in matching
fabianschenk Jun 8, 2020
d7c8bff
fix: merge new data structures with SLAM
fabianschenk Jun 9, 2020
5a16764
feat: SLAM runs mostly but still tracking loss after a bit, wip
fabianschenk Jun 10, 2020
4d01a20
fix: replace landmarks correctly
fabianschenk Jun 11, 2020
7c0b12f
feat: running SLAM version
fabianschenk Jun 15, 2020
aa9e540
Merge branch 'feat-datastructures-opensfm' into feat-datastructures-slam
fabianschenk Jun 15, 2020
104eb1f
feat: early draft PR for SLAM
fabianschenk Jun 15, 2020
fbf8495
remove: remove unused slam_map
fabianschenk Jun 15, 2020
d39bc22
feat: track with local map, remove outliers correctly
fabianschenk Jun 16, 2020
cb094f2
chore: add missing include
paulinus Jun 16, 2020
dfbc23b
feat: add slam run scripts
fabianschenk Jun 16, 2020
d8b56d4
chore: code clean-up, add comments
fabianschenk Jun 16, 2020
3aa8fd7
chore: Removed unused includes
fabianschenk Jun 16, 2020
0eaa680
feat: pypangolin visualization
fabianschenk Jun 17, 2020
8d3f464
feat: pypangolin visualization
fabianschenk Jun 17, 2020
8a2ef80
feat: add boolean for turn on/off visualization
fabianschenk Jun 18, 2020
13ba3a2
feat: buffer points for better visualization
fabianschenk Jun 18, 2020
c9ec4e8
feat: improve robustness, add visualzation as parameter
fabianschenk Jun 22, 2020
1c0adf0
wip: slam debug
fabianschenk Jun 25, 2020
60c302d
merge pose
fabianschenk Jul 1, 2020
ad6f26f
fix: scale constant velocity model correctly
fabianschenk Jul 1, 2020
44f4a12
Merge branch 'feat-datastructures-opensfm' into feat-datastructures-slam
fabianschenk Jul 1, 2020
dc281d4
merge analytical derivatives
fabianschenk Jul 1, 2020
54eede2
merge opensfm and slam
fabianschenk Jul 1, 2020
b30aabf
chore: remove some leftovers from unclean merge
fabianschenk Jul 1, 2020
ead6b2e
wip: fix mapillary_sfm test
fabianschenk Jul 2, 2020
eb5325e
feat: data structure PR in slam
fabianschenk Jul 13, 2020
7333103
merge data structures
fabianschenk Jul 15, 2020
5f0101a
wip: slam robustness
fabianschenk Jul 15, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
8 changes: 8 additions & 0 deletions bin/slam_run
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/usr/bin/env bash

set -e

DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PYTHON=${3:-python3}
echo "Running using Python command: $PYTHON"
$PYTHON $DIR/../opensfm/slam/run_slam.py $1 $2
11 changes: 11 additions & 0 deletions bin/slam_run_all
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
#!/usr/bin/env bash

set -e

DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )
PYTHON=${2:-python3}

echo "Running using Python command: $PYTHON"

$PYTHON $DIR/opensfm extract_metadata $1
$PYTHON $DIR/../opensfm/slam/run_slam.py $1
1 change: 0 additions & 1 deletion opensfm/features.py
Original file line number Diff line number Diff line change
Expand Up @@ -297,7 +297,6 @@ def build_flann_index(features, config):
FLANN_INDEX_KDTREE_SINGLE = 4
FLANN_INDEX_HIERARCHICAL = 5
FLANN_INDEX_LSH = 6

if features.dtype.type is np.float32:
algorithm_type = config['flann_algorithm'].upper()
if algorithm_type == 'KMEANS':
Expand Down
15 changes: 11 additions & 4 deletions opensfm/reconstruction.py
Original file line number Diff line number Diff line change
Expand Up @@ -612,7 +612,6 @@ def two_view_reconstruction_general(p1, p2, camera1, camera2,
"""
R_5p, t_5p, inliers_5p = two_view_reconstruction(
p1, p2, camera1, camera2, threshold, iterations)

R_plane, t_plane, inliers_plane = two_view_reconstruction_plane_based(
p1, p2, camera1, camera2, threshold)

Expand All @@ -621,6 +620,11 @@ def two_view_reconstruction_general(p1, p2, camera1, camera2,
'plane_based_inliers': len(inliers_plane),
}

# A = np.load('general_init.np.npz')
# R_5p = A['R_5p']
# t_5p = A['t_5p']
# inliers_5p = A['inliers_5p']

if len(inliers_5p) > len(inliers_plane):
report['method'] = '5_point'
return R_5p, t_5p, inliers_5p, report
Expand Down Expand Up @@ -953,7 +957,6 @@ def triangulate_shot_features(tracks_manager, reconstruction, shot_id, config):
min_ray_angle = config['triangulation_min_ray_angle']

triangulator = TrackTriangulator(tracks_manager, reconstruction)

for track in tracks_manager.get_shot_observations(shot_id):
if track not in reconstruction.points:
triangulator.triangulate(track, reproj_threshold, min_ray_angle)
Expand Down Expand Up @@ -1121,8 +1124,12 @@ def merge_reconstructions(reconstructions, config):

def paint_reconstruction(data, tracks_manager, reconstruction):
"""Set the color of the points from the color of the tracks."""
for k, point in reconstruction.points.items():
point.color = list(map(float, next(iter(tracks_manager.get_track_observations(str(k)).values())).color))
if tracks_manager is None: # in the SLAM case
reconstruction.map.color_map()
else:
for k, point in reconstruction.points.items():
point.color =\
list(map(float, next(iter(tracks_manager.get_track_observations(str(k)).values())).color))


class ShouldBundle:
Expand Down
71 changes: 71 additions & 0 deletions opensfm/slam/run_slam.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,71 @@
import os.path, sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
import slam_debug
from slam_system import SlamSystem
import argparse
import logging
logging.basicConfig(format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',
datefmt='%Y-%m-%d:%H:%M:%S',
level=logging.DEBUG)
from opensfm import dataset
from opensfm import io

logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)

parser = argparse.ArgumentParser()
parser.add_argument('dataset', help='dataset to process')
parser.add_argument('--visualize', dest='visualize', action='store_const',
const=True, default=False, help='run SLAM visualization')
args = parser.parse_args()
slam_system = SlamSystem(args)
data = dataset.DataSet(args.dataset)

def run_slam(data, slam_vis = None):
# Create the top-level parser
start_id = 0
n_kfs = 0
for idx, im_name in enumerate(sorted(data.image_list)):
if idx < start_id:
continue
# if (idx == 2):
# im_name = "000001.png"
gray_scale_img = io.imread(data._image_file(im_name), grayscale=True) # The gray-scale image

ret = slam_system.process_frame(im_name, gray_scale_img)

if slam_vis is not None and RUN_VISUALIZATION:
slam_vis.update_image(gray_scale_img)
# Update map only after KF insertion
if n_kfs != len(slam_system.slam_mapper.keyframes):
slam_system.reconstruction.map.color_map()
slam_vis.update_reconstruction(slam_system.reconstruction, slam_system.slam_mapper.keyframes)
n_kfs = len(slam_system.slam_mapper.keyframes)
if not slam_vis.is_running:
break

slam_debug.avg_timings.printAvgTimings()
if ret:
logger.info("Successfully tracked {}".format(im_name))
else:
logger.info("Trying to init with {}".format(im_name))

slam_system.slam_mapper.save_reconstruction(im_name + "_new_slam.json")


if __name__ == "__main__":
# For visualization
RUN_VISUALIZATION = args.visualize
if RUN_VISUALIZATION:
from opensfm import visualization
import numpy as np
import threading

if RUN_VISUALIZATION:
vis = visualization.Visualization(
data.load_image(data.image_list[0]).shape)
th = threading.Thread(target=run_slam, args=(data, vis,))
th.start()
vis.run_visualization()
else:
run_slam(data)
32 changes: 32 additions & 0 deletions opensfm/slam/slam_config.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
import yaml
default_config_yaml = '''
# Metadata
extract_features: True # False = load from disk

refine_with_local_map: True
tracker_lk: False
match_symm: True

run_local_ba_every_nth: 1

# Feature parameters
feat_cell_size: 64
feat_cell_overlap: 6
feat_pyr_levels: 8
feat_scale: 1.2
feat_max_number: 4000

# FAST
feat_fast_ini_th: 20
feat_fast_min_th: 7

# grid parameters (from OpenVSlam)
grid_n_cols: 64
grid_n_rows: 48


'''

def default_config():
"""Return default configuration"""
return yaml.safe_load(default_config_yaml)
212 changes: 212 additions & 0 deletions opensfm/slam/slam_debug.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
import logging
import matplotlib.pyplot as plt
import numpy as np
import cv2

from timeit import default_timer as timer
from collections import defaultdict
from opensfm import features
from opensfm import pygeometry
from opensfm import pyslam
logger = logging.getLogger(__name__)

disable_debug = True


class AvgTimings(object):
def __init__(self):
self.times = defaultdict(float)
self.n_mean = defaultdict(int)

def addTimes(self, timings):
for (_, (k, v, _)) in timings.items():
self.times[k] += v
self.n_mean[k] += 1

def printAvgTimings(self):
for (k, v) in self.n_mean.items():
print("{} with {} runs: {}s".format(k, v, self.times[k]/v))


avg_timings = AvgTimings()


class Chronometer:
def __init__(self):
self.start()

def start(self):
t = timer()
lap = ('start', 0, t)
self.laps = [lap]
self.laps_dict = {'start': lap}

def lap(self, key):
t = timer()
dt = t - self.laps[-1][2]
lap = (key, dt, t)
self.laps.append(lap)
self.laps_dict[key] = lap

def lap_time(self, key):
return self.laps_dict[key][1]

def lap_times(self):
return [(k, dt) for k, dt, t in self.laps[1:]]

def total_time(self):
return self.laps[-1][2] - self.laps[0][2]


def check_shot_for_double_entries(shot):
added_lms = {}
for lm, idx in shot.get_valid_landmarks_and_indices():
if lm in added_lms:
print("double!!!", lm.id, idx, added_lms[lm])
exit(0)
else:
added_lms[lm] = idx


def visualize_graph(graph, frame1: str, frame2: str, data, do_show=True):
if disable_debug:
return
print("visualize_graph: ", frame1, frame2)
lms = graph[frame1]
pts2D_1 = []
pts2D_2 = []
for lm_id in lms:
obs2 = \
graph.get_edge_data(str(frame2), str(lm_id))
if obs2 is not None:
obs1 = \
graph.get_edge_data(str(frame1), str(lm_id))
pts2D_1.append(obs1['feature'])
pts2D_2.append(obs2['feature'])
if len(pts2D_1) == 0:
return
im1 = data.load_image(frame1)
im2 = data.load_image(frame2)
h1, w1, c = im1.shape
fig, ax = plt.subplots(1)

obs_d1 = features.\
denormalized_image_coordinates(np.asarray(pts2D_1), w1, h1)
obs_d2 = features.\
denormalized_image_coordinates(np.asarray(pts2D_2), w1, h1)
print("len(obs_d1): ", len(obs_d1), "len(obs_d2): ", len(obs_d2))
im = np.hstack((im1, im2))
ax.imshow(im)
ax.scatter(obs_d1[:, 0], obs_d1[:, 1], c=[[0, 1, 0]])
ax.scatter(w1+obs_d2[:, 0], obs_d2[:, 1], c=[[0, 1, 0]])
ax.set_title(frame1 + "<->" + frame2)

if do_show:
plt.show()


def reproject_landmarks(points3D, observations, T_world_to_cam,
im, camera, title="", obs_normalized=True, do_show=True):
"""Draw observations and reprojects observations into image"""
if disable_debug:
return
if points3D is None: # or observations is None:
return
if len(points3D) == 0: # or len(observations) == 0:
return
pose_world_to_cam = pygeometry.Pose()
pose_world_to_cam.set_rotation_matrix(T_world_to_cam[0:3, 0:3])
pose_world_to_cam.translation = T_world_to_cam[0:3, 3]
legend = ['reproj']
camera_point = pose_world_to_cam.transform_many(points3D)
points2D = camera.project_many(camera_point)
fig, ax = plt.subplots(1)
if len(im.shape) == 3:
h1, w1, c = im.shape
else:
h1, w1 = im.shape
pt = features.denormalized_image_coordinates(points2D, w1, h1)
ax.imshow(im)
ax.scatter(pt[:, 0], pt[:, 1], c=[[1, 0, 0]])
if observations is not None:
if obs_normalized:
obs = features.denormalized_image_coordinates(observations, w1, h1)
else:
obs = observations
ax.scatter(obs[:, 0], obs[:, 1], c=[[0, 1, 0]])
legend.append('observation')
ax.set_title(title)
ax.legend(legend)
if do_show:
plt.show()


def visualize_matches_pts(pts1, pts2, matches, im1, im2, is_normalized= True, do_show=True, title = ""):
if disable_debug:
return
if matches is None:
matches = np.column_stack((np.arange(len(pts1)), np.arange(len(pts1))))
if len(matches) == 0:
return
if len(im1.shape) == 3:
h1, w1, c = im1.shape
else:
h1, w1 = im1.shape

pts1 = np.asarray(pts1)
pts2 = np.asarray(pts2)
fig, ax = plt.subplots(1)
im = np.hstack((im1, im2))
if is_normalized:
obs_d1 = features.\
denormalized_image_coordinates(pts1[matches[:, 0]], w1, h1)
obs_d2 = features.\
denormalized_image_coordinates(pts2[matches[:, 1]], w1, h1)
else:
obs_d1, obs_d2 = pts1[matches[:, 0]], pts2[matches[:, 1]]
ax.imshow(im)
skip = 1
ax.scatter(obs_d1[:, 0], obs_d1[:, 1], c=[[0, 1, 0]])
ax.scatter(w1+obs_d2[:, 0], obs_d2[:, 1], c=[[0, 1, 0]])
# for i1, i2 in matches:
# ax.text(w1+obs_d2[i2, 0], obs_d2[i2, 1], str(i2))
for a, b in zip(obs_d1[::skip, :], obs_d2[::skip, :]):
ax.plot([a[0], b[0] + w1], [a[1], b[1]])
ax.set_title(title)
if do_show:
plt.show()


def visualize_tracked_lms(points2D, shot, data, is_normalized=False):
# if disable_debug:
# return
im1 = data.load_image(shot.id)
h1, w1, c = im1.shape
im1 = cv2.cvtColor(im1, cv2.COLOR_RGB2BGR)
if is_normalized:
p1d = np.array(features.denormalized_image_coordinates(
points2D, w1, h1), dtype=int)
for x, y in p1d:
cv2.drawMarker(im1, (x, y), (255, 0, 0),
markerType=cv2.MARKER_SQUARE, markerSize=10)
else:
for x, y,_ in points2D:
cv2.drawMarker(im1, (x, y), (255, 0, 0),
markerType=cv2.MARKER_SQUARE, markerSize=10)
cv2.imwrite("./debug/track_" + shot.id, im1)


def visualize_lms_shot(shot, im, title="reproj", show=True):
if disable_debug is False:
pose = shot.get_pose()
lms = shot.get_valid_landmarks()
points2D = pyslam.SlamUtilities.get_valid_kpts_from_shot(shot)
points3D = np.zeros((len(lms), 3), dtype=np.float)
for idx, lm in enumerate(lms):
points3D[idx, :] = lm.get_global_pos()
# camera = shot.camera()
reproject_landmarks(points3D, points2D,
pose.get_world_to_cam(),
im,
shot.camera, title="reproj",
obs_normalized=True, do_show=shot)
Loading