diff --git a/src/Dockerfile b/src/Dockerfile new file mode 100644 index 0000000..58b7ac4 --- /dev/null +++ b/src/Dockerfile @@ -0,0 +1,20 @@ +FROM python:3 + +RUN apt-get update && \ + apt-get install -y \ + build-essential \ + python-opencv \ + libpcl-dev + +RUN pip install -U pip && \ + pip install -U \ + cython \ + numpy \ + scipy \ + matplotlib \ + nltk \ + setuptools \ + pylint \ + pickle-mixin + +CMD ["/bin/bash"] diff --git a/src/dataset/LICENSE.md b/src/dataset/LICENSE.md new file mode 100644 index 0000000..39a0f1f --- /dev/null +++ b/src/dataset/LICENSE.md @@ -0,0 +1,9 @@ +The NCLT Dataset is made available under the Open Database License [available here](https://opendatacommons.org/licenses/odbl/1.0/). Any rights in individual contents of the database are licensed under the Database Contents License [available here](https://opendatacommons.org/licenses/dbcl/1.0/). + +In short, this means that you are free to use this dataset, share, create derivative works, or adapt it, as long as you credit our work, offer any publically used adapted version of this dataset under the same license, and keep any redistribution of this dataset open. + +# Citation +Nicholas Carlevaris-Bianco, Arash K. Ushani, and Ryan M. Eustice, University of Michigan North Campus Long-Term Vision and Lidar Dataset, International Journal of Robotics Research, 2016. + +# Website +http://robots.engin.umich.edu/nclt/ \ No newline at end of file diff --git a/src/dataset/ManageDataset/project_vel_to_cam.py b/src/dataset/ManageDataset/project_vel_to_cam.py new file mode 100644 index 0000000..0356d4e --- /dev/null +++ b/src/dataset/ManageDataset/project_vel_to_cam.py @@ -0,0 +1,166 @@ +# !/usr/bin/python +# +# Demonstrates how to project velodyne points to camera imagery. Requires a binary +# velodyne sync file, undistorted image, and assumes that the calibration files are +# in the directory. +# +# To use: +# +# python project_vel_to_cam.py vel img cam_num +# +# vel: The velodyne binary file (timestamp.bin) +# img: The undistorted image (timestamp.tiff) +# cam_num: The index (0 through 5) of the camera +# + +import sys +import struct +import matplotlib.pyplot as plt +import matplotlib.image as mpimg +import numpy as np + +#from undistort import * + +def convert(x_s, y_s, z_s): + + scaling = 0.005 # 5 mm + offset = -100.0 + + x = x_s * scaling + offset + y = y_s * scaling + offset + z = z_s * scaling + offset + + return x, y, z + +def load_vel_hits(filename): + + f_bin = open(filename, "r") + + hits = [] + + while True: + + x_str = f_bin.read(2) + if x_str == '': # eof + break + + x = struct.unpack('0 + x_im = x_im[idx_infront] + y_im = y_im[idx_infront] + z_im = z_im[idx_infront] + + plt.figure(1) + plt.imshow(image) + plt.hold(True) + plt.scatter(x_im, y_im, c=z_im, s=5, linewidths=0) + plt.xlim(0, 1616) + plt.ylim(0, 1232) + plt.show() + + return 0 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/dataset/ManageDataset/read_gps.py b/src/dataset/ManageDataset/read_gps.py new file mode 100644 index 0000000..169ab79 --- /dev/null +++ b/src/dataset/ManageDataset/read_gps.py @@ -0,0 +1,55 @@ +# !/usr/bin/python +# +# Example code to read and plot the gps data. +# +# To call: +# +# python read_gps.py gps.csv +# + +import sys +import matplotlib.pyplot as plt +import numpy as np + +def main(args): + + if len(sys.argv) < 2: + print('Please specify gps file') + return 1 + + gps = np.loadtxt(sys.argv[1], delimiter = ",") + + num_sats = gps[:, 2] + lat = gps[:, 3] + lng = gps[:, 4] + alt = gps[:, 5] + + lat0 = lat[0] + lng0 = lng[0] + + dLat = lat - lat0 + dLng = lng - lng0 + + r = 6400000 # approx. radius of earth (m) + x = r * np.cos(lat0) * np.sin(dLng) + y = r * np.sin(dLat) + + plt.figure() + plt.subplot(1, 2, 1) + plt.scatter(x, y, 1, c=alt, linewidth=0) + plt.axis('equal') + plt.title('By altitude') + plt.colorbar() + + plt.subplot(1, 2, 2) + plt.scatter(x, y, c=num_sats, linewidth=0) + plt.axis('equal') + plt.title('By number of satellites') + plt.colorbar() + + plt.show() + + return 0 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/dataset/ManageDataset/read_ground_truth.py b/src/dataset/ManageDataset/read_ground_truth.py new file mode 100644 index 0000000..eaac65c --- /dev/null +++ b/src/dataset/ManageDataset/read_ground_truth.py @@ -0,0 +1,61 @@ +# !/usr/bin/python +# +# Example code to read and plot the ground truth data. +# +# Note: The ground truth data is provided at a high rate of about 100 Hz. To +# generate this high rate ground truth, a SLAM solution was used. Nodes in the +# SLAM graph were not added at 100 Hz, but rather about every 8 meters. In +# between the nodes in the SLAM graph, the odometry was used to interpolate and +# provide a high rate ground truth. If precise pose is desired (e.g., for +# accumulating point clouds), then we recommend using only the ground truth +# poses that correspond to the nodes in the SLAM graph. This can be found by +# inspecting the timestamps in the covariance file. +# +# To call: +# +# python read_ground_truth.py groundtruth.csv covariance.csv +# + +import sys +import matplotlib.pyplot as plt +import numpy as np +import scipy.interpolate + +def main(args): + + if len(sys.argv) < 3: + print('Please specify ground truth and covariance files') + return 1 + + gt = np.loadtxt(sys.argv[1], delimiter = ",") + cov = np.loadtxt(sys.argv[2], delimiter = ",") + + t_cov = cov[:, 0] + + # Note: Interpolation is not needed, this is done as a convience + interp = scipy.interpolate.interp1d(gt[:, 0], gt[:, 1:], kind='nearest', axis=0, fill_value="extrapolate") + pose_gt = interp(t_cov) + + # NED (North, East Down) + x = pose_gt[:, 0] + y = pose_gt[:, 1] + z = pose_gt[:, 2] + + r = pose_gt[:, 3] + p = pose_gt[:, 4] + h = pose_gt[:, 5] + + plt.figure() + plt.scatter(y, x, 1, c=-z, linewidth=0) # Note Z points down + plt.axis('equal') + plt.title('Ground Truth Position of Nodes in SLAM Graph') + plt.xlabel('East (m)') + plt.ylabel('North (m)') + plt.colorbar() + + plt.show() + + return 0 + +if __name__ == '__main__': + sys.exit(main(sys.argv)) diff --git a/src/dataset/ManageDataset/read_hokuyo_30m.py b/src/dataset/ManageDataset/read_hokuyo_30m.py new file mode 100644 index 0000000..04bf11b --- /dev/null +++ b/src/dataset/ManageDataset/read_hokuyo_30m.py @@ -0,0 +1,72 @@ +# !/usr/bin/python +# +# Example code to go through the hokuyo_30m.bin file, read timestamps and the hits +# in each packet, and plot them. +# +# To call: +# +# python read_hokuyo_30m.py hokuyo_30m.bin +# + +import sys +import struct +import numpy as np +import matplotlib.pyplot as plt + +def convert(x_s): + + scaling = 0.005 # 5 mm + offset = -100.0 + + x = x_s * scaling + offset + + return x + +def main(args): + + if len(sys.argv) < 2: + print("Please specifiy input bin file") + return 1 + + # hokuyo_30m always has 1081 hits + num_hits = 1081 + + # angles for each range observation + rad0 = -135 * (np.pi/180.0) + radstep = 0.25 * (np.pi/180.0) + angles = np.linspace(rad0, rad0 + (num_hits-1)*radstep, num_hits) + + f_bin = open(sys.argv[1], "r") + + plt.ion() + + while True: + + # Read timestamp + utime = struct.unpack(' temp_dist: + g_score[nb_idx] = temp_dist + parent_idx[nb_idx] = cur_node + f_score = g_score[nb_idx] + np.linalg.norm(self.poses[nb_idx] - goal) + + # put into queen + if queueed_flag[nb_idx] == 0: + visit_queue.put(nb_idx, f_score) + queueed_flag[nb_idx] = 1 + + return self_.extract_path(cur_node, parent_idx, start_idx), optimal diff --git a/src/planning/PriorityQueue.py b/src/planning/PriorityQueue.py new file mode 100644 index 0000000..f492273 --- /dev/null +++ b/src/planning/PriorityQueue.py @@ -0,0 +1,15 @@ +import heapq #https://docs.python.org/3/library/heapq.html + +class PriorityQueue: + def __init__(self): + self.elements = [] + + def empty(self): + return len(self.elements) == 0 + + def put(self, item, priority): + heapq.heappush(self.elements, (priority, item)) + + def get(self): + return heapq.heappop(self.elements)[1] + diff --git a/src/planning/astar_driver.py b/src/planning/astar_driver.py new file mode 100644 index 0000000..c7f1553 --- /dev/null +++ b/src/planning/astar_driver.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +"""kdtree_testing + +Automatically generated by Colaboratory. + +Original file is located at + https://colab.research.google.com/drive/15-biioPffqZoK2zW9Fvi9t53y0o14XbZ +""" + +import sys +import matplotlib.pyplot as plt +import numpy as np +import math +import random +import time +import pdb + +import Astar + +def load_poses(pose_gt_file) : + pose_gt = np.loadtxt(pose_gt_file, delimiter = ",") + return pose_gt + + +def main() : + #poses = load_poses('../dataset/ground_truth/groundtruth_2012-01-08.csv') + poses = load_poses('../dataset/ground_truth/debug.csv') + sparseness = 10 + k=50 + + sparse_poses = poses[1::sparseness, 1:3] + astar = Astar.Astar(poses=sparse_poses, k=k) + + start_idx = np.random.randint(sparse_poses.shape[0]) + goal_idx = np.random.randint(sparse_poses.shape[0]) + path, optimal = astar.find_path(start_idx, goal_idx) + + np.save('optimal_path', path) + +if __name__ == '__main__' : + main()