This commit is contained in:
Yushu Zhang
2020-04-30 20:02:11 -04:00
40 changed files with 2181 additions and 3 deletions

View File

@@ -36,6 +36,14 @@
- [Scipy](https://www.scipy.org/)
- [Matplotlib](https://matplotlib.org/)
- [Natural Language Toolkit](https://www.nltk.org/)
- [Cpython](https://pypi.org/project/cPython/)
- [NLTK](https://pypi.org/project/nltk/)
- [Setup Tools](https://pypi.org/project/setuptools/)
- [Pylint](https://pypi.org/project/pylint/)
- [Spacy](https://pypi.org/project/spacy/)
- [Pickle](https://pypi.org/project/pickle-mixin/)
- [TensorFlow](https://pypi.org/project/tensorflow/)
- [Keras](https://pypi.org/project/Keras/)
## Docker
@@ -49,3 +57,21 @@ After cloning the repo, start your docker machine and following commands shown b
1. `cd /PATH/TO/UMICH_NCLT_SLAP/src`
2. `docker-compose run --rm python-dev`
### Semantic Language Parsing: Chatbot
For standalone testing of the chatbot, run the following commands
1. `cd /PATH/TO/UMICH_NCLT_SLAP/semantic/src`
2. `docker-compose run --rm python-dev`
1. `cd app/semantic`
2. `python gui_chatbot.py`
You can update the models by changing the intent or pickle files. Intent.json can be changed wiht a basic text editor and pickles can be read and changed using pickleManage.py.
1. `cd /PATH/TO/UMICH_NCLT_SLAP/src/datset/dataManipulation/pickles`
2. `python`
3. `from pickleManage import *`
4. Use desired functions. Functions are documented with examples in pickleManage.py file.
To update the models are making changes run:
'python

9
src/dataset/LICENSE.md Normal file
View File

@@ -0,0 +1,9 @@
The NCLT Dataset is made available under the Open Database License [available here](https://opendatacommons.org/licenses/odbl/1.0/). Any rights in individual contents of the database are licensed under the Database Contents License [available here](https://opendatacommons.org/licenses/dbcl/1.0/).
In short, this means that you are free to use this dataset, share, create derivative works, or adapt it, as long as you credit our work, offer any publically used adapted version of this dataset under the same license, and keep any redistribution of this dataset open.
# Citation
Nicholas Carlevaris-Bianco, Arash K. Ushani, and Ryan M. Eustice, University of Michigan North Campus Long-Term Vision and Lidar Dataset, International Journal of Robotics Research, 2016.
# Website
http://robots.engin.umich.edu/nclt/

View File

@@ -0,0 +1,166 @@
# !/usr/bin/python
#
# Demonstrates how to project velodyne points to camera imagery. Requires a binary
# velodyne sync file, undistorted image, and assumes that the calibration files are
# in the directory.
#
# To use:
#
# python project_vel_to_cam.py vel img cam_num
#
# vel: The velodyne binary file (timestamp.bin)
# img: The undistorted image (timestamp.tiff)
# cam_num: The index (0 through 5) of the camera
#
import sys
import struct
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
#from undistort import *
def convert(x_s, y_s, z_s):
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
y = y_s * scaling + offset
z = z_s * scaling + offset
return x, y, z
def load_vel_hits(filename):
f_bin = open(filename, "r")
hits = []
while True:
x_str = f_bin.read(2)
if x_str == '': # eof
break
x = struct.unpack('<H', x_str)[0]
y = struct.unpack('<H', f_bin.read(2))[0]
z = struct.unpack('<H', f_bin.read(2))[0]
i = struct.unpack('B', f_bin.read(1))[0]
l = struct.unpack('B', f_bin.read(1))[0]
x, y, z = convert(x, y, z)
# Load in homogenous
hits += [[x, y, z, 1]]
f_bin.close()
hits = np.asarray(hits)
return hits.transpose()
def ssc_to_homo(ssc):
# Convert 6-DOF ssc coordinate transformation to 4x4 homogeneous matrix
# transformation
sr = np.sin(np.pi/180.0 * ssc[3])
cr = np.cos(np.pi/180.0 * ssc[3])
sp = np.sin(np.pi/180.0 * ssc[4])
cp = np.cos(np.pi/180.0 * ssc[4])
sh = np.sin(np.pi/180.0 * ssc[5])
ch = np.cos(np.pi/180.0 * ssc[5])
H = np.zeros((4, 4))
H[0, 0] = ch*cp
H[0, 1] = -sh*cr + ch*sp*sr
H[0, 2] = sh*sr + ch*sp*cr
H[1, 0] = sh*cp
H[1, 1] = ch*cr + sh*sp*sr
H[1, 2] = -ch*sr + sh*sp*cr
H[2, 0] = -sp
H[2, 1] = cp*sr
H[2, 2] = cp*cr
H[0, 3] = ssc[0]
H[1, 3] = ssc[1]
H[2, 3] = ssc[2]
H[3, 3] = 1
return H
def project_vel_to_cam(hits, cam_num):
# Load camera parameters
K = np.loadtxt('K_cam%d.csv' % (cam_num), delimiter=',')
x_lb3_c = np.loadtxt('x_lb3_c%d.csv' % (cam_num), delimiter=',')
# Other coordinate transforms we need
x_body_lb3 = [0.035, 0.002, -1.23, -179.93, -0.23, 0.50]
# Now do the projection
T_lb3_c = ssc_to_homo(x_lb3_c)
T_body_lb3 = ssc_to_homo(x_body_lb3)
T_lb3_body = np.linalg.inv(T_body_lb3)
T_c_lb3 = np.linalg.inv(T_lb3_c)
T_c_body = np.matmul(T_c_lb3, T_lb3_body)
hits_c = np.matmul(T_c_body, hits)
hits_im = np.matmul(K, hits_c[0:3, :])
return hits_im
def main(args):
if len(args)<4:
print("""Incorrect usage.
To use:
python project_vel_to_cam.py vel img cam_num
vel: The velodyne binary file (timestamp.bin)
img: The undistorted image (timestamp.tiff)
cam_num: The index (0 through 5) of the camera
""")
return 1
# Load velodyne points
hits_body = load_vel_hits(args[1])
# Load image
image = mpimg.imread(args[2])
cam_num = int(args[3])
hits_image = project_vel_to_cam(hits_body, cam_num)
x_im = hits_image[0, :]/hits_image[2, :]
y_im = hits_image[1, :]/hits_image[2, :]
z_im = hits_image[2, :]
idx_infront = z_im>0
x_im = x_im[idx_infront]
y_im = y_im[idx_infront]
z_im = z_im[idx_infront]
plt.figure(1)
plt.imshow(image)
plt.hold(True)
plt.scatter(x_im, y_im, c=z_im, s=5, linewidths=0)
plt.xlim(0, 1616)
plt.ylim(0, 1232)
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,55 @@
# !/usr/bin/python
#
# Example code to read and plot the gps data.
#
# To call:
#
# python read_gps.py gps.csv
#
import sys
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(sys.argv) < 2:
print('Please specify gps file')
return 1
gps = np.loadtxt(sys.argv[1], delimiter = ",")
num_sats = gps[:, 2]
lat = gps[:, 3]
lng = gps[:, 4]
alt = gps[:, 5]
lat0 = lat[0]
lng0 = lng[0]
dLat = lat - lat0
dLng = lng - lng0
r = 6400000 # approx. radius of earth (m)
x = r * np.cos(lat0) * np.sin(dLng)
y = r * np.sin(dLat)
plt.figure()
plt.subplot(1, 2, 1)
plt.scatter(x, y, 1, c=alt, linewidth=0)
plt.axis('equal')
plt.title('By altitude')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.scatter(x, y, c=num_sats, linewidth=0)
plt.axis('equal')
plt.title('By number of satellites')
plt.colorbar()
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,61 @@
# !/usr/bin/python
#
# Example code to read and plot the ground truth data.
#
# Note: The ground truth data is provided at a high rate of about 100 Hz. To
# generate this high rate ground truth, a SLAM solution was used. Nodes in the
# SLAM graph were not added at 100 Hz, but rather about every 8 meters. In
# between the nodes in the SLAM graph, the odometry was used to interpolate and
# provide a high rate ground truth. If precise pose is desired (e.g., for
# accumulating point clouds), then we recommend using only the ground truth
# poses that correspond to the nodes in the SLAM graph. This can be found by
# inspecting the timestamps in the covariance file.
#
# To call:
#
# python read_ground_truth.py groundtruth.csv covariance.csv
#
import sys
import matplotlib.pyplot as plt
import numpy as np
import scipy.interpolate
def main(args):
if len(sys.argv) < 3:
print('Please specify ground truth and covariance files')
return 1
gt = np.loadtxt(sys.argv[1], delimiter = ",")
cov = np.loadtxt(sys.argv[2], delimiter = ",")
t_cov = cov[:, 0]
# Note: Interpolation is not needed, this is done as a convience
interp = scipy.interpolate.interp1d(gt[:, 0], gt[:, 1:], kind='nearest', axis=0, fill_value="extrapolate")
pose_gt = interp(t_cov)
# NED (North, East Down)
x = pose_gt[:, 0]
y = pose_gt[:, 1]
z = pose_gt[:, 2]
r = pose_gt[:, 3]
p = pose_gt[:, 4]
h = pose_gt[:, 5]
plt.figure()
plt.scatter(y, x, 1, c=-z, linewidth=0) # Note Z points down
plt.axis('equal')
plt.title('Ground Truth Position of Nodes in SLAM Graph')
plt.xlabel('East (m)')
plt.ylabel('North (m)')
plt.colorbar()
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,72 @@
# !/usr/bin/python
#
# Example code to go through the hokuyo_30m.bin file, read timestamps and the hits
# in each packet, and plot them.
#
# To call:
#
# python read_hokuyo_30m.py hokuyo_30m.bin
#
import sys
import struct
import numpy as np
import matplotlib.pyplot as plt
def convert(x_s):
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
return x
def main(args):
if len(sys.argv) < 2:
print("Please specifiy input bin file")
return 1
# hokuyo_30m always has 1081 hits
num_hits = 1081
# angles for each range observation
rad0 = -135 * (np.pi/180.0)
radstep = 0.25 * (np.pi/180.0)
angles = np.linspace(rad0, rad0 + (num_hits-1)*radstep, num_hits)
f_bin = open(sys.argv[1], "r")
plt.ion()
while True:
# Read timestamp
utime = struct.unpack('<Q', f_bin.read(8))[0]
print('Timestamp', utime)
r = np.zeros(num_hits)
for i in range(num_hits):
s = struct.unpack('<H', f_bin.read(2))[0]
r[i] = convert(s)
#print s
x = r * np.cos(angles)
y = r * np.sin(angles)
plt.clf()
plt.plot(x, y, '.')
plt.title(utime)
plt.draw()
f_bin.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,72 @@
# !/usr/bin/python
#
# Example code to go through the hokuyo_4m.bin file, read timestamps and the hits
# in each packet, and plot them.
#
# To call:
#
# python read_hokuyo_4m.py hokuyo_4m.bin
#
import sys
import struct
import numpy as np
import matplotlib.pyplot as plt
def convert(x_s):
scaling = 0.005 # 5 mm
offset = -100.0
x = x_s * scaling + offset
return x
def main(args):
if len(sys.argv) < 2:
print("Please specifiy input bin file")
return 1
# hokuyo_4 always has 726 hits
num_hits = 726
# angles for each range observation
rad0 = -2.0862138
radstep = 0.0061359233
angles = np.linspace(rad0, rad0 + (num_hits-1)*radstep, num_hits)
f_bin = open(sys.argv[1], "r")
plt.ion()
while True:
# Read timestamp
utime = struct.unpack('<Q', f_bin.read(8))[0]
print('Timestamp', utime)
r = np.zeros(num_hits)
for i in range(num_hits):
s = struct.unpack('<H', f_bin.read(2))[0]
r[i] = convert(s)
#print s
x = r * np.cos(angles)
y = r * np.sin(angles)
plt.clf()
plt.plot(x, y, '.')
plt.title(utime)
plt.draw()
f_bin.close()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,71 @@
# !/usr/bin/python
#
# Example code to read and plot the microstrain data.
#
# To call:
#
# python read_ms25.py ms25.csv
#
import sys
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(sys.argv) < 2:
print('Please specify microstrain file')
return 1
ms25 = np.loadtxt(sys.argv[1], delimiter = ",")
t = ms25[:, 0]
mag_x = ms25[:, 1]
mag_y = ms25[:, 2]
mag_z = ms25[:, 3]
accel_x = ms25[:, 4]
accel_y = ms25[:, 5]
accel_z = ms25[:, 6]
rot_r = ms25[:, 7]
rot_p = ms25[:, 8]
rot_h = ms25[:, 9]
plt.figure()
plt.subplot(1, 3, 1)
plt.plot(t, mag_x, 'r')
plt.plot(t, mag_y, 'g')
plt.plot(t, mag_z, 'b')
plt.legend(['X', 'Y', 'Z'])
plt.title('Magnetic Field')
plt.xlabel('utime (us)')
plt.ylabel('Gauss')
plt.subplot(1, 3, 2)
plt.plot(t, accel_x, 'r')
plt.plot(t, accel_y, 'g')
plt.plot(t, accel_z, 'b')
plt.legend(['X', 'Y', 'Z'])
plt.title('Acceleration')
plt.xlabel('utime (us)')
plt.ylabel('m/s^2')
plt.subplot(1, 3, 3)
plt.plot(t, rot_r, 'r')
plt.plot(t, rot_p, 'g')
plt.plot(t, rot_h, 'b')
plt.legend(['r', 'p', 'h'])
plt.title('Angular Rotation Rate')
plt.xlabel('utime (us)')
plt.ylabel('rad/s')
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,43 @@
# !/usr/bin/python
#
# Example code to read and plot the microstrain euler angles data.
#
# To call:
#
# python read_ms25_euler.py ms25_euler.csv
#
import sys
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(sys.argv) < 2:
print('Please specify microstrain file')
return 1
euler = np.loadtxt(sys.argv[1], delimiter = ",")
t = euler[:, 0]
r = euler[:, 1]
p = euler[:, 2]
h = euler[:, 3]
plt.figure()
plt.plot(t, r, 'r')
plt.plot(t, p, 'g')
plt.plot(t, h, 'b')
plt.legend(['r', 'p', 'h'])
plt.title('Euler Angles')
plt.xlabel('utime (us)')
plt.ylabel('rad')
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,50 @@
# !/usr/bin/python
#
# Example code to read and plot the odometry data.
#
# To call:
#
# python read_odom.py odometry.csv
#
import sys
import matplotlib.pyplot as plt
import numpy as np
def main(args):
if len(sys.argv) < 2:
print('Please specify odometry file')
return 1
odom = np.loadtxt(sys.argv[1], delimiter = ",")
t = odom[:, 0]
x = odom[:, 1]
y = odom[:, 2]
z = odom[:, 3]
r = odom[:, 4]
p = odom[:, 5]
h = odom[:, 6]
plt.figure()
plt.subplot(1, 2, 1)
plt.scatter(x, y, 1, c=z, linewidth=0)
plt.axis('equal')
plt.title('Odometry position')
plt.colorbar()
plt.subplot(1, 2, 2)
plt.plot(t, r, 'r')
plt.plot(t, p, 'g')
plt.plot(t, h, 'b')
plt.legend(['Roll', 'Pitch', 'Heading'])
plt.title('Odometry rph')
plt.show()
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))

View File

@@ -0,0 +1,88 @@
"""
Demonstrating how to undistort images.
Reads in the given calibration file, parses it, and uses it to undistort the given
image. Then display both the original and undistorted images.
To use:
python undistort.py image calibration_file
"""
import numpy as np
import cv2
import matplotlib.pyplot as plt
import argparse
import re
class Undistort(object):
def __init__(self, fin, scale=1.0, fmask=None):
self.fin = fin
# read in distort
with open(fin, 'r') as f:
#chunks = f.readline().rstrip().split(' ')
header = f.readline().rstrip()
chunks = re.sub(r'[^0-9,]', '', header).split(',')
self.mapu = np.zeros((int(chunks[1]),int(chunks[0])),
dtype=np.float32)
self.mapv = np.zeros((int(chunks[1]),int(chunks[0])),
dtype=np.float32)
for line in f.readlines():
chunks = line.rstrip().split(' ')
self.mapu[int(chunks[0]),int(chunks[1])] = float(chunks[3])
self.mapv[int(chunks[0]),int(chunks[1])] = float(chunks[2])
# generate a mask
self.mask = np.ones(self.mapu.shape, dtype=np.uint8)
self.mask = cv2.remap(self.mask, self.mapu, self.mapv, cv2.INTER_LINEAR)
kernel = np.ones((30,30),np.uint8)
self.mask = cv2.erode(self.mask, kernel, iterations=1)
"""
Optionally, define a mask
"""
def set_mask(fmask):
# add in the additional mask passed in as fmask
if fmask:
mask = cv2.cvtColor(cv2.imread(fmask), cv2.COLOR_BGR2GRAY)
self.mask = self.mask & mask
new_shape = (int(self.mask.shape[1]*scale), int(self.mask.shape[0]*scale))
self.mask = cv2.resize(self.mask, new_shape,
interpolation=cv2.INTER_CUBIC)
#plt.figure(1)
#plt.imshow(self.mask, cmap='gray')
#plt.show()
"""
Use OpenCV to undistorted the given image
"""
def undistort(self, img):
return cv2.resize(cv2.remap(img, self.mapu, self.mapv, cv2.INTER_LINEAR),
(self.mask.shape[1], self.mask.shape[0]),
interpolation=cv2.INTER_CUBIC)
def main():
parser = argparse.ArgumentParser(description="Undistort images")
parser.add_argument('image', metavar='img', type=str, help='image to undistort')
parser.add_argument('map', metavar='map', type=str, help='undistortion map')
args = parser.parse_args()
undistort = Undistort(args.map)
print('Loaded camera calibration')
im = cv2.imread(args.image)
cv2.namedWindow('Image', cv2.WINDOW_NORMAL)
cv2.imshow('Image', im)
im_undistorted = undistort.undistort(im)
cv2.namedWindow('Undistorted Image', cv2.WINDOW_NORMAL)
cv2.imshow('Undistorted Image', im_undistorted)
cv2.waitKey(0)
cv2.destroyAllWindows()
if __name__ == "__main__":
main()

View File

@@ -0,0 +1,25 @@
import sys
import numpy as np
# Usage: changes lat, long, alt into xyz local frame
# This takes in lat, long, and alt and calculates xyz
# Example: print(gpstoLocalFrame(42.29360387311647,-83.71222615242006,272))
# Returns: array of XYZ coordinates in radians
def gpstoLocalFrame(lat, lng, alt):
lat0 = 0.7381566413
lng0 = -1.4610097151
alt0 = 265.8
print(np.deg2rad(lng))
dLat = np.deg2rad(lat) - lat0
print(dLat)
dLng = np.deg2rad(lng) - lng0
dAlt = alt - alt0
r = 6400000 # approx. radius of earth (m)
# WARNING: x and y may need to be flipped. Paper and example files from NCLT have contradictory usages
y = r * np.cos(lat0) * np.sin(dLng)
x = r * np.sin(dLat)
z = dAlt
return [x,y,z]

View File

@@ -0,0 +1,79 @@
import sys
import numpy as np
import pickle
import math
# Example usage of overall file to find xyz coordinates of two buildings given name as string
'''
GPScoords = findClosestEntrance("BBB", "EECS")
Building1 = gpstoLocalFrame(GPScoords[0][0], GPScoords[0][1], GPScoords[0][2])
Building2 = gpstoLocalFrame(GPScoords[1][0], GPScoords[1][1], GPScoords[1][2])
print(Building1)
print(Building2)
'''
# Usage: finds building coordinates in lat, long, and alt in degrees
# This takes in two building names as strings and returns
# closest two entrances
# Example: GPScoords = findClosestEntrance("BBB", "EECS")
# print(GPScoords[0][0]) #returns latitutde of first building
# Returns: 2x2 array of two GPS coordinates in lat, long, and alt in degrees
def findClosestEntrance(building1, building2):
gps1 = buildingtoGPS(building1)
gps2 = buildingtoGPS(building2)
x = [0,0,0,0]
x[0] = calculateDistance(gps1[0][0],gps1[0][1],gps2[0][0],gps2[0][1])
x[1] = calculateDistance(gps1[0][0],gps1[0][1],gps2[1][0],gps2[1][1])
x[2] = calculateDistance(gps1[1][0],gps1[1][1],gps2[0][0],gps2[0][1])
x[3] = calculateDistance(gps1[1][0],gps1[1][1],gps2[1][0],gps2[1][1])
index = np.argmin(x)
if index == 0:
return [gps1[0],gps2[0]]
elif index == 1:
return [gps1[0],gps2[1]]
elif index == 2:
return [gps1[1],gps2[0]]
else:
return [gps1[1],gps2[1]]
# Usage: finds building coordinates in lat, long, and alt in degrees
# This takes in a building name and looks up coordinates in pickle file
# Example: buildingsGPScoords = buildingtoGPS(building1)
# print(buildingGPScoords[0]) #returns latitutde of the building
# Returns: array of GPS coordinates (lat, long, and alt) in degrees
def buildingtoGPS(building):
pickle_in = open('pickles/BuildingMappings.pkl',"rb")
currDict = pickle.load(pickle_in)
for place in currDict:
if place == building:
return currDict.get(building)
return 0
# Usage: changes lat, long, alt into xyz local frame
# This takes in lat, long, and alt and calculates xyz
# Example: print(gpstoLocalFrame(42.29360387311647,-83.71222615242006,272))
# Returns: array of XYZ coordinates in radians
def gpstoLocalFrame(lat, lng, alt):
lat0 = 0.7381566413
lng0 = -1.4610097151
alt0 = 265.8
dLat = np.deg2rad(lat) - lat0
dLng = np.deg2rad(lng) - lng0
dAlt = alt - alt0
r = 6400000 # approx. radius of earth (m)
# WARNING: x and y may need to be flipped. Paper and example files from NCLT have contradictory usages
y = r * np.cos(lat0) * np.sin(dLng)
x = r * np.sin(dLat)
z = dAlt
return [x,y,z]
# Usage: Euclidean distance calculator - helper function
def calculateDistance(x1,y1,x2,y2):
dist = math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
return dist

View File

@@ -0,0 +1,57 @@
(dp0
S'EECS'
p1
(lp2
(lp3
F42.29259200117389
aF-83.71376574039459
aI266
aa(lp4
F42.29250866981882
aF-83.71487081050874
aI264
aasS'Duderstadt'
p5
(lp6
(lp7
F42.29067138383511
aF-83.7162870168686
aI261
aa(lp8
F42.29158011297468
aF-83.71514439582826
aI263
aasS'FXB'
p9
(lp10
(lp11
F42.29360387311647
aF-83.71222615242006
aI272
aa(lp12
F42.29359196883519
aF-83.71161460876466
aI274
aasS'Pierpont'
p13
(lp14
(lp15
F42.291536462529756
aF-83.71705412864686
aI261
aa(lp16
F42.29065947899958
aF-83.7178158760071
aI258
aasS'BBB'
p17
(lp18
(lp19
F42.292667396114446
aF-83.71626019477846
aI264
aa(lp20
F42.2933737232794
aF-83.71622264385225
aI271
aas.

View File

@@ -0,0 +1,5 @@
EECS
Duderstadt
Pierpont
BBB
FXB

View File

@@ -0,0 +1,32 @@
import pickle
# Usage: file to print current pickle files to text file
# this allows us to monitor current dictionaries
# Example: printPickle("BuildingMappings")
# Output: Text file of dictonary keys
def printPickle(filename):
pickle_in = open(filename + '.pkl',"rb")
currDict = pickle.load(pickle_in)
f = open(filename + '.txt',"w")
for x in currDict:
f.write('%s\n' % x )
f.close()
# Usage: creates pickle files from given dictionaries
# Example: createPickle('test', {'Bart', 'Lisa', 'Milhouse', 'Nelson'})
# Output: new Pickle file
def createPickle(filename, pklList):
f = open(filename + '.pkl', 'wb') # Pickle file is newly created where foo1.py is
pickle.dump(pklList, f) # dump data to f
f.close()
# Usage: updates pickle files from given dictionaries
# Example: updatePickle('test', {'Bart', 'Lisa', 'Milhouse', 'Nelson'})
# Output: Pickle file
def updatePickle(filename, pklList):
pickle_in = open(filename + '.pkl',"rb")
currDict = pickle.load(pickle_in)
f = open(filename + '.pkl', 'wb') # Pickle file is newly created where foo1.py is
pickle.dump(currDict + pklList, f) # dump data to f
f.close()

View File

@@ -0,0 +1,29 @@
Reference file for coordinates found for google maps
(latitude, longitude, altitude)
FXB
Beal Ave - 42.29360387311647 -83.71222615242006 272
Hayward - 42.29359196883519 -83.71161460876466 274
EECS
Beal Ave - 42.29259200117389 -83.71376574039459 266
Grove - 42.29250866981882 -83.71487081050874 264
BBB
Grove - 42.292667396114446 -83.71626019477846 264
Hayward - 42.2933737232794 -83.71622264385225 271
Pierpont
Grove - 42.291536462529756 -83.71705412864686 261
Bonisteel - 42.29065947899958 -83.7178158760071 258
Duderstadt
Bonisteel - 42.29067138383511 -83.7162870168686 261
Grover - 42.29158011297468 -83.71514439582826 263
Example for making and changing buildings from pickle file
from pickleManage import *
createPickle("BuildingMappings", {'Duderstadt':[[42.29067138383511, -83.7162870168686, 261],[42.29158011297468, -83.71514439582826, 263]], 'Pierpont':[[42.291536462529756, -83.71705412864686, 261],[42.29065947899958, -83.7178158760071, 258]], 'BBB':[[42.292667396114446, -83.71626019477846, 264],[42.2933737232794, -83.71622264385225, 271]], 'EECS':[[42.29259200117389, -83.71376574039459, 266],[42.29250866981882, -83.71487081050874, 264]],'FXB':[[42.29360387311647, -83.71222615242006, 272],[42.29359196883519, -83.71161460876466, 274]]})
updatePickle("BuildingMappings", {'Duderstadt':[[42.29067138383511, -83.7162870168686, 261],[42.29158011297468, -83.71514439582826, 263]]})
printPickle("BuildingMappings")

View File

@@ -9,7 +9,7 @@ import argparse
base_dir = 'http://robots.engin.umich.edu/nclt'
dates = [];
dates = []
dates.append('2012-01-08')
dates.append('2012-01-15')
dates.append('2012-01-22')

File diff suppressed because one or more lines are too long

View File

@@ -4,7 +4,8 @@ RUN apt-get update && \
apt-get install -y \
build-essential \
python-opencv \
libpcl-dev
libpcl-dev \
x11-apps
RUN pip install -U pip && \
pip install -U \
@@ -14,6 +15,14 @@ RUN pip install -U pip && \
matplotlib \
nltk \
setuptools \
pylint
pylint \
pickle-mixin \
spacy \
--upgrade setuptools \
--no-cache-dir tensorflow \
keras
RUN python -m spacy download en_core_web_sm
pickle-mixin
CMD ["/bin/bash"]

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,27 @@
# Docker Compose
# docker-compose.yml format version
version: '3'
# Define services
services:
# Python Development Container
python-dev:
# Use Dockerfile in current folder
build: .
# Mount ros-dev folder on host to app folder in container
volumes:
- ./control:/app/control
- ./dataset:/app/dataset
- ./localization:/app/localization
- ./planning:/app/planning
- ./semantic:/app/semantic
- ./visualization:/app/visualization
- /tmp/.X11-unix/:/tmp/.X11-unix
# Set DISPLAY variable and network mode for GUIs
environment:
- DISPLAY=$DISPLAY
#- DISPLAY=${IP_ADDRESS}:0.0
network_mode: "host"
# Set working directory in container to app folder
working_dir: /app

201
src/semantic/gui_chatbot.py Normal file
View File

@@ -0,0 +1,201 @@
# from python example and tutorial here: https://data-flair.training/blogs/python-chatbot-project/
# also utilizes examples from spacy website
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import pickle
import numpy as np
import spacy
import tkinter
from tkinter import *
from keras.models import load_model
model = load_model('chatbot_model.h5')
modelBuilding = load_model('buildings_model.h5')
import json
import random
intents = json.loads(open('intents/intents.json').read())
words = pickle.load(open('pickles/words.pkl','rb'))
classes = pickle.load(open('pickles/classes.pkl','rb'))
buildingsIntents = json.loads(open('intents/buildingIntents.json').read())
building_words = pickle.load(open('pickles/building_words.pkl','rb'))
buildings = pickle.load(open('pickles/buildings.pkl','rb'))
confirmation = 0
startNav = 0 #TODO: START CONVERSION TO GPS COORDINATES
completedNav = 0 #TODO: Add response once complete
emergencyExit = 0 #TODO: OPTIONAL STOP EVERYTHING
def clean_up_sentence(sentence):
# tokenize the pattern - splitting words into array
sentence_words = nltk.word_tokenize(sentence)
# stemming every word - reducing to base form
sentence_words = [lemmatizer.lemmatize(word.lower()) for word in sentence_words]
return sentence_words
# return bag of words array: 0 or 1 for words that exist in sentence
def bag_of_words(sentence, wording, show_details=True):
# tokenizing patterns
sentence_words = clean_up_sentence(sentence)
# bag of words - vocabulary matrix
bag = [0]*len(wording)
for s in sentence_words:
for i,word in enumerate(wording):
if word == s:
# assign 1 if current word is in the vocabulary position
bag[i] = 1
if show_details:
print ("found in bag: %s" % word)
return(np.array(bag))
def predict_class(sentence):
# filter below threshold predictions
p = bag_of_words(sentence, words,show_details=False)
res = model.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.25
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sorting strength probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"intent": classes[r[0]], "probability": str(r[1])})
return return_list
def predict_building(currbuilding):
# filter below threshold predictions
p = bag_of_words(currbuilding, building_words,show_details=False)
res = modelBuilding.predict(np.array([p]))[0]
ERROR_THRESHOLD = 0.5
results = [[i,r] for i,r in enumerate(res) if r>ERROR_THRESHOLD]
# sorting strength probability
results.sort(key=lambda x: x[1], reverse=True)
return_list = []
for r in results:
return_list.append({"buildingIntents": buildings[r[0]], "probability": str(r[1])})
return return_list
def getResponse(ints, intents_json):
tag = ints[0]['intent']
list_of_intents = intents_json['intents']
for i in list_of_intents:
if(i['tag']== tag):
result = random.choice(i['responses'])
break
return result
def getBuildingInfo(sentence):
doc = nlp(sentence)
start = 0
end = 0
startBuilding = "random location"
stopBuilding = "random location"
for token in doc:
if token.pos_ == "PROPN" and start == 1:
startBuilding = token.text
elif token.pos_ == "PROPN" and end == 1:
stopBuilding = token.text
elif token.text == "to":
start = 0
end = 1
elif token.text == "from":
start = 1
end = 0
else:
pass
# print(token.text)
return [startBuilding, stopBuilding]
#Creating tkinter GUI
def send():
msgClean = EntryBox.get("1.0",'end-1c')
msg = msgClean.strip()
EntryBox.delete("0.0",END)
if msg != '':
ChatBox.config(state=NORMAL)
ChatBox.insert(END, "You: " + msg + '\n\n')
ChatBox.config(foreground="#446665", font=("Verdana", 12 ))
ints = predict_class(msg)
global confirmation
global startNav
global emergencyExit
# adds rule based chatbot to confirm navigation
if (ints[0]['intent'] == "yes" or ints[0]['intent'] == "no") and confirmation == 1 and startNav == 0:
emergencyExit = 0
if ints[0]['intent'] == "yes":
res = "Starting navigation. Please wait for process to complete. This may take a couple minutes."
startNav = 1
elif ints[0]['intent'] == "no":
res = "Cancelled operation"
confirmation = 0
elif ints[0]['intent'] == "navigation" and startNav == 0:
emergencyExit = 0
currbuilding = getBuildingInfo(msgClean)
if currbuilding[0] == 'random location':
currbuilding[0] = buildings[random.randint(0, len(buildings)-1)]
while currbuilding[0] == currbuilding[1]:
currbuilding[1] = buildings[random.randint(0, len(buildings)-1)]
if currbuilding[1] == 'random location':
currbuilding[1] = buildings[random.randint(0, len(buildings)-1)]
while currbuilding[0] == currbuilding[1]:
currbuilding[1] = buildings[random.randint(0, len(buildings)-1)]
fromBuild = predict_building(currbuilding[0])
toBuild = predict_building(currbuilding[1])
res = "You chose navigating to " + toBuild[0]['buildingIntents'] + " building from " + fromBuild[0]['buildingIntents'] + " building. Is this correct?"
confirmation = 1
elif ints[0]['intent'] == "exit":
res = getResponse(ints, intents)
startNav = 0
emergencyExit = 1
elif startNav == 1:
emergencyExit = 0
res = "Please wait while the navigation is processing"
else:
emergencyExit = 0
res = getResponse(ints, intents)
ChatBox.insert(END, "Belatrix: " + res + '\n\n')
ChatBox.config(state=DISABLED)
ChatBox.yview(END)
root = Tk()
root.title("Chatbot")
root.geometry("400x500")
root.resizable(width=FALSE, height=FALSE)
#import nlp dictionary
nlp = spacy.load("en_core_web_sm")
nltk.download('punkt')
nltk.download('wordnet')
#Create Chat window
ChatBox = Text(root, bd=0, bg="white", height="8", width="50", font="Arial",)
ChatBox.config(state=DISABLED)
#Bind scrollbar to Chat window
scrollbar = Scrollbar(root, command=ChatBox.yview, cursor="heart")
ChatBox['yscrollcommand'] = scrollbar.set
#Create Button to send message
SendButton = Button(root, font=("Verdana",12,'bold'), text="Send", width="12", height=5,
bd=0, bg="#f9a602", activebackground="#3c9d9b",fg='#000000',
command= send )
#Create the box to enter message
EntryBox = Text(root, bd=0, bg="white",width="29", height="5", font="Arial")
#EntryBox.bind("<Return>", send)
#Place all components on the screen
scrollbar.place(x=376,y=6, height=386)
ChatBox.place(x=6,y=6, height=386, width=370)
EntryBox.place(x=128, y=401, height=90, width=265)
SendButton.place(x=6, y=401, height=90)
root.mainloop()

View File

@@ -0,0 +1,28 @@
{"intents": [
{"tag": "Bob and Betty Beyster",
"patterns": ["BBB", "CSE", "CS","Computer Science", "Computer", "Bob", "Bob and Betty Beyster", "Betty", "Computer Science Department", "CS Department"],
"responses": ["Bob and Betty Beyster"],
"context": [""]
},
{"tag": "Duderstadt",
"patterns": ["Dude", "the Dude", "Duderstadt", "Mujos", "Library", "North Campus Library"],
"responses": ["Duderstadt"],
"context": [""]
},
{"tag": "FXB",
"patterns": ["FXB", "Francois-Xavier Bagnoud", "Aerospace", "aerospace", "Aerospace Engineering", "planes", "Aerospace Department", "Aerospace Engineering Department"],
"responses": ["FXB"],
"context": [""]
},
{"tag": "Electrical and Computer Engineering",
"patterns": ["Electrical and Computer Engineering","Electrical", "Electrical Engineering", "Computer Engineering", "EECS", "ECE", "Electrical Engineering Department", "EECS Department", "ECE Department"],
"responses": ["Electrical and Computer Engineering"],
"context": [""]
},
{"tag": "Pierpont Commons",
"patterns": ["Pierpont", "Pierpont Commons", "Commons", "Panda Express"],
"responses": ["Pierpont Commons"],
"context": [""]
}
]
}

View File

@@ -0,0 +1,48 @@
{"intents": [
{"tag": "greeting",
"patterns": ["Hi there", "How are you", "Is anyone there?","Hey","Hola", "Hello", "Good day"],
"responses": ["Hello, thanks for asking", "Good to see you again", "Hi there, how can I help?"],
"context": [""]
},
{"tag": "goodbye",
"patterns": ["Bye", "See you later", "Goodbye", "Nice chatting to you, bye", "Till next time"],
"responses": ["See you!", "Have a nice day", "Bye! Come back again soon."],
"context": [""]
},
{"tag": "thanks",
"patterns": ["Thanks", "Thank you", "That's helpful", "Awesome, thanks", "Thanks for helping me"],
"responses": ["Happy to help!", "Any time!", "My pleasure"],
"context": [""]
},
{"tag": "noanswer",
"patterns": [],
"responses": ["Sorry, can't understand you", "Please give me more info", "Not sure I understand"],
"context": [""]
},
{"tag": "options",
"patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"],
"responses": ["I can take you to multiple buildings including BBB, EECS, and more on north campus."],
"context": [""]
},
{"tag": "navigation",
"patterns": ["Can you take me to the ", "Take me to the building", "Map me to the location", "Navigate me to the building from the building"],
"responses": ["Starting Navigation"],
"context": ["navigation_to_building"]
},
{"tag": "exit",
"patterns": ["stop", "quit", "end", "I want to stop navigation"],
"responses": ["Ending current navigation"],
"context": ["navigation_to_building"]
},
{"tag": "yes",
"patterns": ["yes", "y", "sure", "right", "correct"],
"responses": ["I am sorry. I don't understand"],
"context": ["navigation_to_building"]
},
{"tag": "no",
"patterns": ["no", "nope", "n", "wrong", "incorrect"],
"responses": ["I am sorry. I don't understand"],
"context": ["navigation_to_building"]
}
]
}

Binary file not shown.

View File

@@ -0,0 +1,14 @@
Dude
Computer Science
CSE
FXB
BBB
Aerospace Engineering
Electrical Engineering
EECS
ECE
Pierpont
Duderstadt
Francois-Xavier Bagnoud
Bob and Betty Beyster
Pierpont Commons

Binary file not shown.

View File

@@ -0,0 +1,5 @@
Duderstadt
Electrical and Computer Engineering
FXB
Pierpont Commons
Bob and Betty Beyster

Binary file not shown.

View File

@@ -0,0 +1,11 @@
blood_pressure_search
exit
goodbye
greeting
hospital_search
navigation
options
pharmacy_search
thanks
navigation
exit

Binary file not shown.

Binary file not shown.

View File

@@ -0,0 +1,92 @@
's
a
adverse
all
anyone
are
awesome
be
behavior
blood
by
bye
can
causing
chatting
check
could
data
day
detail
do
dont
drug
entry
find
for
give
good
goodbye
have
hello
help
helpful
helping
hey
hi
history
hola
hospital
how
i
id
is
later
list
load
locate
log
looking
lookup
management
me
module
nearby
next
nice
of
offered
open
patient
pharmacy
pressure
provide
reaction
related
result
search
searching
see
show
suitable
support
task
thank
thanks
that
there
till
time
to
transfer
up
want
what
which
with
you
navigation
map
locate
navigate
building

View File

@@ -0,0 +1,96 @@
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import json
import pickle
building_words=[]
buildings = []
documents = []
ignore_letters = ['!', '?', ',', '.']
buildingIntents_file = open('intents/buildingIntents.json').read()
buildingIntents = json.loads(buildingIntents_file)
# download nltk resources
nltk.download('punkt')
nltk.download('wordnet')
for intent in buildingIntents['intents']:
for pattern in intent['patterns']:
#tokenize each word
word = nltk.word_tokenize(pattern)
building_words.extend(word)
#add documents in the corpus
documents.append((word, intent['tag']))
# add to our buildings list
if intent['tag'] not in buildings:
buildings.append(intent['tag'])
print(documents)
# lemmaztize and lower each word and remove duplicates
building_words = [lemmatizer.lemmatize(w.lower()) for w in building_words if w not in ignore_letters]
building_words = sorted(list(set(building_words)))
# sort buildings
buildings = sorted(list(set(buildings)))
# documents = combination between patterns and buildingIntents
print (len(documents), "documents")
# buildings = buildingIntents
print (len(buildings), "buildings", buildings)
# building_words = all building_words, vocabulary
print (len(building_words), "unique lemmatized building_words", building_words)
pickle.dump(building_words,open('pickles/building_words.pkl','wb'))
pickle.dump(buildings,open('pickles/buildings.pkl','wb'))
# create our training data
training = []
# create an empty array for our output
output_empty = [0] * len(buildings)
# training set, bag of building_words for each sentence
for doc in documents:
# initialize our bag of building_words
bag = []
# list of tokenized building_words for the pattern
pattern_building_words = doc[0]
# lemmatize each word - create base word, in attempt to represent related building_words
pattern_building_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_building_words]
# create our bag of building_words array with 1, if word match found in current pattern
for word in building_words:
bag.append(1) if word in pattern_building_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag (for each pattern)
output_row = list(output_empty)
output_row[buildings.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists. X - patterns, Y - buildingIntents
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Buildings Training data created")
# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of buildingIntents to predict output intent with softmax
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#fitting and saving the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('buildings_model.h5', hist)
print("building model created")

View File

@@ -0,0 +1,96 @@
import numpy as np
from keras.models import Sequential
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
import random
import nltk
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
import json
import pickle
words=[]
classes = []
documents = []
ignore_letters = ['!', '?', ',', '.']
intents_file = open('intents/intents.json').read()
intents = json.loads(intents_file)
# download nltk resources
nltk.download('punkt')
nltk.download('wordnet')
for intent in intents['intents']:
for pattern in intent['patterns']:
#tokenize each word
word = nltk.word_tokenize(pattern)
words.extend(word)
#add documents in the corpus
documents.append((word, intent['tag']))
# add to our classes list
if intent['tag'] not in classes:
classes.append(intent['tag'])
print(documents)
# lemmaztize and lower each word and remove duplicates
words = [lemmatizer.lemmatize(w.lower()) for w in words if w not in ignore_letters]
words = sorted(list(set(words)))
# sort classes
classes = sorted(list(set(classes)))
# documents = combination between patterns and intents
print (len(documents), "documents")
# classes = intents
print (len(classes), "classes", classes)
# words = all words, vocabulary
print (len(words), "unique lemmatized words", words)
pickle.dump(words,open('pickles/words.pkl','wb'))
pickle.dump(classes,open('pickles/classes.pkl','wb'))
# create our training data
training = []
# create an empty array for our output
output_empty = [0] * len(classes)
# training set, bag of words for each sentence
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# lemmatize each word - create base word, in attempt to represent related words
pattern_words = [lemmatizer.lemmatize(word.lower()) for word in pattern_words]
# create our bag of words array with 1, if word match found in current pattern
for word in words:
bag.append(1) if word in pattern_words else bag.append(0)
# output is a '0' for each tag and '1' for current tag (for each pattern)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
training.append([bag, output_row])
# shuffle our features and turn into np.array
random.shuffle(training)
training = np.array(training)
# create train and test lists. X - patterns, Y - intents
train_x = list(training[:,0])
train_y = list(training[:,1])
print("Training data created")
# Create model - 3 layers. First layer 128 neurons, second layer 64 neurons and 3rd output layer contains number of neurons
# equal to number of intents to predict output intent with softmax
model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]),), activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), activation='softmax'))
# Compile model. Stochastic gradient descent with Nesterov accelerated gradient gives good results for this model
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy'])
#fitting and saving the model
hist = model.fit(np.array(train_x), np.array(train_y), epochs=200, batch_size=5, verbose=1)
model.save('chatbot_model.h5', hist)
print("model created")