diff --git a/src/semantic/chatbot_model.h5 b/src/semantic/chatbot_model.h5 index 2449402..035945f 100644 Binary files a/src/semantic/chatbot_model.h5 and b/src/semantic/chatbot_model.h5 differ diff --git a/src/semantic/classes.pkl b/src/semantic/classes.pkl index ccf71de..e670d60 100644 Binary files a/src/semantic/classes.pkl and b/src/semantic/classes.pkl differ diff --git a/src/semantic/classes.txt b/src/semantic/classes.txt index 52d56a9..1b20465 100644 --- a/src/semantic/classes.txt +++ b/src/semantic/classes.txt @@ -1,9 +1,11 @@ -adverse_drug -blood_pressure blood_pressure_search +exit goodbye greeting hospital_search +navigation options pharmacy_search thanks +navigation +exit diff --git a/src/semantic/gui_chatbot.py b/src/semantic/gui_chatbot.py index d205db3..17def45 100644 --- a/src/semantic/gui_chatbot.py +++ b/src/semantic/gui_chatbot.py @@ -1,8 +1,11 @@ +# from python example and tutorial here: https://data-flair.training/blogs/python-chatbot-project/ + import nltk from nltk.stem import WordNetLemmatizer lemmatizer = WordNetLemmatizer() import pickle import numpy as np +import spacy from keras.models import load_model model = load_model('chatbot_model.h5') @@ -12,7 +15,6 @@ intents = json.loads(open('intents.json').read()) words = pickle.load(open('words.pkl','rb')) classes = pickle.load(open('classes.pkl','rb')) - def clean_up_sentence(sentence): # tokenize the pattern - splitting words into array sentence_words = nltk.word_tokenize(sentence) @@ -26,10 +28,10 @@ def bag_of_words(sentence, words, show_details=True): # tokenizing patterns sentence_words = clean_up_sentence(sentence) # bag of words - vocabulary matrix - bag = [0]*len(words) + bag = [0]*len(words) for s in sentence_words: for i,word in enumerate(words): - if word == s: + if word == s: # assign 1 if current word is in the vocabulary position bag[i] = 1 if show_details: @@ -52,40 +54,78 @@ def predict_class(sentence): def getResponse(ints, intents_json): tag = ints[0]['intent'] list_of_intents = intents_json['intents'] + print("ints") + print(ints) for i in list_of_intents: if(i['tag']== tag): result = random.choice(i['responses']) break return result +def getInfo(sentence): + doc = nlp(sentence) + start = 0 + end = 0 + startBuilding = "random location" + stopBuilding = "random location" + for token in doc: + if token.pos_ == "PROPN" and start == 1: + startBuilding = token.text + elif token.pos_ == "PROPN" and end == 1: + stopBuilding = token.text + elif token.text == "to": + start = 0 + end = 1 + elif token.text == "from": + start = 1 + end = 0 + else: + pass + # print(token.text) + return [startBuilding, stopBuilding] + #Creating tkinter GUI import tkinter from tkinter import * def send(): - msg = EntryBox.get("1.0",'end-1c').strip() + msgClean = EntryBox.get("1.0",'end-1c') + msg = msgClean.strip() EntryBox.delete("0.0",END) if msg != '': ChatBox.config(state=NORMAL) ChatBox.insert(END, "You: " + msg + '\n\n') ChatBox.config(foreground="#446665", font=("Verdana", 12 )) - + ints = predict_class(msg) - res = getResponse(ints, intents) - - ChatBox.insert(END, "Bot: " + res + '\n\n') - + if ints[0]['intent'] == "navigation": + building = getInfo(msgClean) + #TODO: Check if buildings are available + res = "Now navigating to " + building[1] + " from " + building[0] + #TODO: START CONVERSION TO GPS COORDINATES + elif ints[0]['intent'] == "exit": + res = getResponse(ints, intents) + #TODO: STOP EVERYTHING + else: + res = getResponse(ints, intents) + ChatBox.insert(END, "Belatrix: " + res + '\n\n') + ChatBox.config(state=DISABLED) ChatBox.yview(END) - + root = Tk() root.title("Chatbot") root.geometry("400x500") root.resizable(width=FALSE, height=FALSE) +#import nlp dictionary +nlp = spacy.load("en_core_web_sm") +nltk.download('punkt') +nltk.download('wordnet') + #Create Chat window ChatBox = Text(root, bd=0, bg="white", height="8", width="50", font="Arial",) diff --git a/src/semantic/intents.json b/src/semantic/intents.json index 3f97da9..f8de93e 100644 --- a/src/semantic/intents.json +++ b/src/semantic/intents.json @@ -21,53 +21,18 @@ }, {"tag": "options", "patterns": ["How you could help me?", "What you can do?", "What help you provide?", "How you can be helpful?", "What support is offered"], - "responses": ["I can guide you through Adverse drug reaction list, Blood pressure tracking, Hospitals and Pharmacies", "Offering support for Adverse drug reaction, Blood pressure, Hospitals and Pharmacies"], + "responses": ["I can take you to multiple buildings including BBB, EECS, and more on north campus."], "context": [""] }, {"tag": "navigation", - "patterns": ["How to check Adverse drug reaction?", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ], + "patterns": ["Can you take me to the ", "Open adverse drugs module", "Give me a list of drugs causing adverse behavior", "List all drugs suitable for patient with adverse reaction", "Which drugs dont have adverse reaction?" ], "responses": ["Navigating to Adverse drug reaction module"], - "context": [""] + "context": ["navigation_to_building"] }, {"tag": "exit", - "patterns": ["Open blood pressure module", "Task related to blood pressure", "Blood pressure data entry", "I want to log blood pressure results", "Blood pressure data management" ], - "responses": ["Navigating to Blood Pressure module"], - "context": [""] - }, - {"tag": "blood_pressure_search", - "patterns": ["I want to search for blood pressure result history", "Blood pressure for patient", "Load patient blood pressure result", "Show blood pressure results for patient", "Find blood pressure results by ID" ], - "responses": ["Please provide Patient ID", "Patient ID?"], - "context": ["search_blood_pressure_by_patient_id"] - }, - {"tag": "search_blood_pressure_by_patient_id", - "patterns": [], - "responses": ["Loading Blood pressure result for Patient"], - "context": [""] - }, - {"tag": "pharmacy_search", - "patterns": ["Find me a pharmacy", "Find pharmacy", "List of pharmacies nearby", "Locate pharmacy", "Search pharmacy" ], - "responses": ["Please provide pharmacy name"], - "context": ["search_pharmacy_by_name"] - }, - {"tag": "search_pharmacy_by_name", - "patterns": [], - "responses": ["Loading pharmacy details"], - "context": [""] - }, - {"tag": "hospital_search", - "patterns": ["Lookup for hospital", "Searching for hospital to transfer patient", "I want to search hospital data", "Hospital lookup for patient", "Looking up hospital details" ], - "responses": ["Please provide hospital name or location"], - "context": ["search_hospital_by_params"] - }, - {"tag": "search_hospital_by_params", - "patterns": [], - "responses": ["Please provide hospital type"], - "context": ["search_hospital_by_type"] - }, - {"tag": "search_hospital_by_type", - "patterns": [], - "responses": ["Loading hospital details"], - "context": [""] + "patterns": ["stop", "quit", "end", "I want to stop navigation"], + "responses": ["Ending current navigation"], + "context": ["navigation_to_building"] } ] } diff --git a/src/semantic/pickleManage.py b/src/semantic/pickleManage.py index daa8b3f..17f0a6f 100644 --- a/src/semantic/pickleManage.py +++ b/src/semantic/pickleManage.py @@ -21,12 +21,9 @@ def updatePickle(filename, pklList): pickle_in = open(filename + '.pkl',"rb") currDict = pickle.load(pickle_in) f = open(filename + '.pkl', 'wb') # Pickle file is newly created where foo1.py is - pickle.dump(currDict|pklList, f) # dump data to f + pickle.dump(currDict + pklList, f) # dump data to f f.close() -printPickle("classes") -printPickle("words") - # Example usage # createPickle('test', {'Bart', 'Lisa', 'Milhouse', 'Nelson'}) # updatePickle('test', {'Theo'}) diff --git a/src/semantic/train_chatbot.py b/src/semantic/train_chatbot.py index 2c211ae..22e2bdd 100644 --- a/src/semantic/train_chatbot.py +++ b/src/semantic/train_chatbot.py @@ -17,6 +17,10 @@ ignore_letters = ['!', '?', ',', '.'] intents_file = open('intents.json').read() intents = json.loads(intents_file) +# download nltk resources +nltk.download('punkt') +nltk.download('wordnet') + for intent in intents['intents']: for pattern in intent['patterns']: #tokenize each word diff --git a/src/semantic/words.pkl b/src/semantic/words.pkl index 71d696a..0397e80 100644 Binary files a/src/semantic/words.pkl and b/src/semantic/words.pkl differ diff --git a/src/semantic/words.txt b/src/semantic/words.txt index 724b6dd..40d4d55 100644 --- a/src/semantic/words.txt +++ b/src/semantic/words.txt @@ -85,3 +85,8 @@ what which with you +navigation +map +locate +navigate +building