modified basic chatbot to include grabbing information from entities

This commit is contained in:
snbenge
2020-04-19 19:19:41 -04:00
parent f10eb69a75
commit a030c250c9
9 changed files with 70 additions and 57 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -1,9 +1,11 @@
adverse_drug
blood_pressure
blood_pressure_search blood_pressure_search
exit
goodbye goodbye
greeting greeting
hospital_search hospital_search
navigation
options options
pharmacy_search pharmacy_search
thanks thanks
navigation
exit

View File

@@ -1,8 +1,11 @@
# from python example and tutorial here: https://data-flair.training/blogs/python-chatbot-project/
import nltk import nltk
from nltk.stem import WordNetLemmatizer from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer() lemmatizer = WordNetLemmatizer()
import pickle import pickle
import numpy as np import numpy as np
import spacy
from keras.models import load_model from keras.models import load_model
model = load_model('chatbot_model.h5') model = load_model('chatbot_model.h5')
@@ -12,7 +15,6 @@ intents = json.loads(open('intents.json').read())
words = pickle.load(open('words.pkl','rb')) words = pickle.load(open('words.pkl','rb'))
classes = pickle.load(open('classes.pkl','rb')) classes = pickle.load(open('classes.pkl','rb'))
def clean_up_sentence(sentence): def clean_up_sentence(sentence):
# tokenize the pattern - splitting words into array # tokenize the pattern - splitting words into array
sentence_words = nltk.word_tokenize(sentence) sentence_words = nltk.word_tokenize(sentence)
@@ -52,19 +54,44 @@ def predict_class(sentence):
def getResponse(ints, intents_json): def getResponse(ints, intents_json):
tag = ints[0]['intent'] tag = ints[0]['intent']
list_of_intents = intents_json['intents'] list_of_intents = intents_json['intents']
print("ints")
print(ints)
for i in list_of_intents: for i in list_of_intents:
if(i['tag']== tag): if(i['tag']== tag):
result = random.choice(i['responses']) result = random.choice(i['responses'])
break break
return result return result
def getInfo(sentence):
doc = nlp(sentence)
start = 0
end = 0
startBuilding = "random location"
stopBuilding = "random location"
for token in doc:
if token.pos_ == "PROPN" and start == 1:
startBuilding = token.text
elif token.pos_ == "PROPN" and end == 1:
stopBuilding = token.text
elif token.text == "to":
start = 0
end = 1
elif token.text == "from":
start = 1
end = 0
else:
pass
# print(token.text)
return [startBuilding, stopBuilding]
#Creating tkinter GUI #Creating tkinter GUI
import tkinter import tkinter
from tkinter import * from tkinter import *
def send(): def send():
msg = EntryBox.get("1.0",'end-1c').strip() msgClean = EntryBox.get("1.0",'end-1c')
msg = msgClean.strip()
EntryBox.delete("0.0",END) EntryBox.delete("0.0",END)
if msg != '': if msg != '':
@@ -73,9 +100,17 @@ def send():
ChatBox.config(foreground="#446665", font=("Verdana", 12 )) ChatBox.config(foreground="#446665", font=("Verdana", 12 ))
ints = predict_class(msg) ints = predict_class(msg)
if ints[0]['intent'] == "navigation":
building = getInfo(msgClean)
#TODO: Check if buildings are available
res = "Now navigating to " + building[1] + " from " + building[0]
#TODO: START CONVERSION TO GPS COORDINATES
elif ints[0]['intent'] == "exit":
res = getResponse(ints, intents) res = getResponse(ints, intents)
#TODO: STOP EVERYTHING
ChatBox.insert(END, "Bot: " + res + '\n\n') else:
res = getResponse(ints, intents)
ChatBox.insert(END, "Belatrix: " + res + '\n\n')
ChatBox.config(state=DISABLED) ChatBox.config(state=DISABLED)
ChatBox.yview(END) ChatBox.yview(END)
@@ -86,6 +121,11 @@ root.title("Chatbot")
root.geometry("400x500") root.geometry("400x500")
root.resizable(width=FALSE, height=FALSE) root.resizable(width=FALSE, height=FALSE)
#import nlp dictionary
nlp = spacy.load("en_core_web_sm")
nltk.download('punkt')
nltk.download('wordnet')
#Create Chat window #Create Chat window
ChatBox = Text(root, bd=0, bg="white", height="8", width="50", font="Arial",) ChatBox = Text(root, bd=0, bg="white", height="8", width="50", font="Arial",)

View File

@@ -21,53 +21,18 @@
}, },
{"tag": "options", {"tag": "options",
"patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"], "patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"],
"responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"], "responses": ["I can take you to multiple buildings including BBB, EECS, and more on north campus."],
"context": [""] "context": [""]
}, },
{"tag": "navigation", {"tag": "navigation",
"patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ], "patterns": ["Can you take me to the ", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ],
"responses": ["Navigating to Adverse drug reaction module"], "responses": ["Navigating to Adverse drug reaction module"],
"context": [""] "context": ["navigation_to_building"]
}, },
{"tag": "exit", {"tag": "exit",
"patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ], "patterns": ["stop", "quit", "end", "I want to stop navigation"],
"responses": ["Navigating to Blood Pressure module"], "responses": ["Ending current navigation"],
"context": [""] "context": ["navigation_to_building"]
},
{"tag": "blood_pressure_search",
"patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ],
"responses": ["Please provide Patient ID", "Patient ID?"],
"context": ["search_blood_pressure_by_patient_id"]
},
{"tag": "search_blood_pressure_by_patient_id",
"patterns": [],
"responses": ["Loading Blood pressure result for Patient"],
"context": [""]
},
{"tag": "pharmacy_search",
"patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ],
"responses": ["Please provide pharmacy name"],
"context": ["search_pharmacy_by_name"]
},
{"tag": "search_pharmacy_by_name",
"patterns": [],
"responses": ["Loading pharmacy details"],
"context": [""]
},
{"tag": "hospital_search",
"patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ],
"responses": ["Please provide hospital name or location"],
"context": ["search_hospital_by_params"]
},
{"tag": "search_hospital_by_params",
"patterns": [],
"responses": ["Please provide hospital type"],
"context": ["search_hospital_by_type"]
},
{"tag": "search_hospital_by_type",
"patterns": [],
"responses": ["Loading hospital details"],
"context": [""]
} }
] ]
} }

View File

@@ -21,12 +21,9 @@ def updatePickle(filename, pklList):
pickle_in = open(filename + '.pkl',"rb") pickle_in = open(filename + '.pkl',"rb")
currDict = pickle.load(pickle_in) currDict = pickle.load(pickle_in)
f = open(filename + '.pkl', 'wb') # Pickle file is newly created where foo1.py is f = open(filename + '.pkl', 'wb') # Pickle file is newly created where foo1.py is
pickle.dump(currDict|pklList, f) # dump data to f pickle.dump(currDict + pklList, f) # dump data to f
f.close() f.close()
printPickle("classes")
printPickle("words")
# Example usage # Example usage
# createPickle('test', {'Bart', 'Lisa', 'Milhouse', 'Nelson'}) # createPickle('test', {'Bart', 'Lisa', 'Milhouse', 'Nelson'})
# updatePickle('test', {'Theo'}) # updatePickle('test', {'Theo'})

View File

@@ -17,6 +17,10 @@ ignore_letters = ['!', '?', ',', '.']
intents_file = open('intents.json').read() intents_file = open('intents.json').read()
intents = json.loads(intents_file) intents = json.loads(intents_file)
# download nltk resources
nltk.download('punkt')
nltk.download('wordnet')
for intent in intents['intents']: for intent in intents['intents']:
for pattern in intent['patterns']: for pattern in intent['patterns']:
#tokenize each word #tokenize each word

Binary file not shown.

View File

@@ -85,3 +85,8 @@ what
which which
with with
you you
navigation
map
locate
navigate
building