diff --git a/src/semantic/buildings_model.h5 b/src/semantic/buildings_model.h5 index 16fd97e..fd9565d 100644 Binary files a/src/semantic/buildings_model.h5 and b/src/semantic/buildings_model.h5 differ diff --git a/src/semantic/chatbot_model.h5 b/src/semantic/chatbot_model.h5 index 6c0ae6f..1d996b9 100644 Binary files a/src/semantic/chatbot_model.h5 and b/src/semantic/chatbot_model.h5 differ diff --git a/src/semantic/gui_chatbot.py b/src/semantic/gui_chatbot.py index 00eab88..cd18fca 100644 --- a/src/semantic/gui_chatbot.py +++ b/src/semantic/gui_chatbot.py @@ -12,12 +12,12 @@ model = load_model('chatbot_model.h5') modelBuilding = load_model('buildings_model.h5') import json import random -intents = json.loads(open('intents.json').read()) -words = pickle.load(open('words.pkl','rb')) -classes = pickle.load(open('classes.pkl','rb')) -buildingsIntents = json.loads(open('buildingIntents.json').read()) -building_words = pickle.load(open('building_words.pkl','rb')) -buildings = pickle.load(open('buildings.pkl','rb')) +intents = json.loads(open('intents/intents.json').read()) +words = pickle.load(open('pickles/words.pkl','rb')) +classes = pickle.load(open('pickles/classes.pkl','rb')) +buildingsIntents = json.loads(open('intents/buildingIntents.json').read()) +building_words = pickle.load(open('pickles/building_words.pkl','rb')) +buildings = pickle.load(open('pickles/buildings.pkl','rb')) confirmation = 0 def clean_up_sentence(sentence): diff --git a/src/semantic/buildingIntents.json b/src/semantic/intents/buildingIntents.json similarity index 100% rename from src/semantic/buildingIntents.json rename to src/semantic/intents/buildingIntents.json diff --git a/src/semantic/intents.json b/src/semantic/intents/intents.json similarity index 100% rename from src/semantic/intents.json rename to src/semantic/intents/intents.json diff --git a/src/semantic/building_words.pkl b/src/semantic/pickles/building_words.pkl similarity index 100% rename from src/semantic/building_words.pkl rename to src/semantic/pickles/building_words.pkl diff --git a/src/semantic/building_words.txt b/src/semantic/pickles/building_words.txt similarity index 100% rename from src/semantic/building_words.txt rename to src/semantic/pickles/building_words.txt diff --git a/src/semantic/buildings.pkl b/src/semantic/pickles/buildings.pkl similarity index 100% rename from src/semantic/buildings.pkl rename to src/semantic/pickles/buildings.pkl diff --git a/src/semantic/buildings.txt b/src/semantic/pickles/buildings.txt similarity index 100% rename from src/semantic/buildings.txt rename to src/semantic/pickles/buildings.txt diff --git a/src/semantic/classes.pkl b/src/semantic/pickles/classes.pkl similarity index 100% rename from src/semantic/classes.pkl rename to src/semantic/pickles/classes.pkl diff --git a/src/semantic/classes.txt b/src/semantic/pickles/classes.txt similarity index 100% rename from src/semantic/classes.txt rename to src/semantic/pickles/classes.txt diff --git a/src/semantic/pickleManage.py b/src/semantic/pickles/pickleManage.py similarity index 100% rename from src/semantic/pickleManage.py rename to src/semantic/pickles/pickleManage.py diff --git a/src/semantic/words.pkl b/src/semantic/pickles/words.pkl similarity index 100% rename from src/semantic/words.pkl rename to src/semantic/pickles/words.pkl diff --git a/src/semantic/words.txt b/src/semantic/pickles/words.txt similarity index 100% rename from src/semantic/words.txt rename to src/semantic/pickles/words.txt diff --git a/src/semantic/train_buildings.py b/src/semantic/train_buildings.py index edcc2fc..2e19ac5 100644 --- a/src/semantic/train_buildings.py +++ b/src/semantic/train_buildings.py @@ -14,7 +14,7 @@ building_words=[] buildings = [] documents = [] ignore_letters = ['!', '?', ',', '.'] -buildingIntents_file = open('buildingIntents.json').read() +buildingIntents_file = open('intents/buildingIntents.json').read() buildingIntents = json.loads(buildingIntents_file) # download nltk resources @@ -44,8 +44,8 @@ print (len(buildings), "buildings", buildings) # building_words = all building_words, vocabulary print (len(building_words), "unique lemmatized building_words", building_words) -pickle.dump(building_words,open('building_words.pkl','wb')) -pickle.dump(buildings,open('buildings.pkl','wb')) +pickle.dump(building_words,open('pickles/building_words.pkl','wb')) +pickle.dump(buildings,open('pickles/buildings.pkl','wb')) # create our training data training = [] diff --git a/src/semantic/train_chatbot.py b/src/semantic/train_chatbot.py index 22e2bdd..39a4b21 100644 --- a/src/semantic/train_chatbot.py +++ b/src/semantic/train_chatbot.py @@ -14,7 +14,7 @@ words=[] classes = [] documents = [] ignore_letters = ['!', '?', ',', '.'] -intents_file = open('intents.json').read() +intents_file = open('intents/intents.json').read() intents = json.loads(intents_file) # download nltk resources @@ -44,8 +44,8 @@ print (len(classes), "classes", classes) # words = all words, vocabulary print (len(words), "unique lemmatized words", words) -pickle.dump(words,open('words.pkl','wb')) -pickle.dump(classes,open('classes.pkl','wb')) +pickle.dump(words,open('pickles/words.pkl','wb')) +pickle.dump(classes,open('pickles/classes.pkl','wb')) # create our training data training = []