So I accidently installed a package without checking the size of the package, and it crashed my repl

You’d probably be able to figure it out eventually for someone, right?

1 Like

Well it depends on whether or not crosis will even be able to read the files. But hopefully yes

3 Likes

I managed to install tensorflow, now I just need an example program

2 Likes

Heres the code and stuff that I originally had, it takes a little time to setup; I don’t really know any quick examples other than the few guides I’ve been reading,
this goes in a file, named intense.json:

"intents": [
{"tag": "greetings",
"patterns": ["hello", "hi"],
"responses": ["Hello!", "Hey!"]
}

this goes into a file called training.py:

import random
import json
import pickle
import numpy as np
import nltk
  
from keras.models import Sequential
from nltk.stem import WordNetLemmatizer
from keras.layers import Dense, Activation, Dropout
from keras.optimizers import SGD
  
  
lemmatizer = WordNetLemmatizer()
  
intents = json.loads(open("intense.json").read())
  
words = []
classes = []
documents = []
ignore_letters = ["?", "!", ".", ","]
for intent in intents['intents']:
    for pattern in intent['patterns']:
        word_list = nltk.word_tokenize(pattern)
        words.extend(word_list)  # and adding them to words list
          
        documents.append(((word_list), intent['tag']))
  
        if intent['tag'] not in classes:
            classes.append(intent['tag'])
  
words = [lemmatizer.lemmatize(word)
         for word in words if word not in ignore_letters]
words = sorted(set(words))
  
pickle.dump(words, open('words.pkl', 'wb'))
pickle.dump(classes, open('classes.pkl', 'wb'))

training = []
output_empty = [0]*len(classes)
for document in documents:
    bag = []
    word_patterns = document[0]
    word_patterns = [lemmatizer.lemmatize(
        word.lower()) for word in word_patterns]
    for word in words:
        bag.append(1) if word in word_patterns else bag.append(0)
          
    output_row = list(output_empty)
    output_row[classes.index(document[1])] = 1
    training.append([bag, output_row])
random.shuffle(training)
training = np.array(training)
  
train_x = list(training[:, 0])
train_y = list(training[:, 1])

model = Sequential()
model.add(Dense(128, input_shape=(len(train_x[0]), ),
                activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(len(train_y[0]), 
                activation='softmax'))
  
sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy',
              optimizer=sgd, metrics=['accuracy'])
hist = model.fit(np.array(train_x), np.array(train_y),
                 epochs=200, batch_size=5, verbose=1)
  
model.save("chatbotmodel.h5", hist)

This code will go directly in main.py:

import random
import json
import pickle
import numpy as np
import nltk
from keras.models import load_model
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
intents = json.loads(open("intense.json").read())
words = pickle.load(open('words.pkl', 'rb'))
classes = pickle.load(open('classes.pkl', 'rb'))
model = load_model('chatbotmodel.h5')
  
def clean_up_sentences(sentence):
    sentence_words = nltk.word_tokenize(sentence)
    sentence_words = [lemmatizer.lemmatize(word)
                      for word in sentence_words]
    return sentence_words
  
def bagw(sentence):
    sentence_words = clean_up_sentences(sentence)
    bag = [0]*len(words)
    for w in sentence_words:
        for i, word in enumerate(words):
            if word == w:
                bag[i] = 1
    return np.array(bag)
  
def predict_class(sentence):
    bow = bagw(sentence)
    res = model.predict(np.array([bow]))[0]
    ERROR_THRESHOLD = 0.25
    results = [[i, r] for i, r in enumerate(res)
               if r > ERROR_THRESHOLD]
    results.sort(key=lambda x: x[1], reverse=True)
    return_list = []
    for r in results:
        return_list.append({'intent': classes[r[0]],
                            'probability': str(r[1])})
        return return_list
  
def get_response(intents_list, intents_json):
    tag = intents_list[0]['intent']
    list_of_intents = intents_json['intents']
    result = ""
    for i in list_of_intents:
        if i['tag'] == tag:
            result = random.choice(i['responses'])
            break
    return result
  
print("Chatbot is up!")
  
while True:
    message = input("")
    ints = predict_class(message)
    res = get_response(ints, intents)
    print(res)

What this should do is basically make a mini chat bot that’ll respond to hello, and hi; it should be able to piece together sentences and pick apart words to be able to respond to more and stuff.

This topic was automatically closed 7 days after the last reply. New replies are no longer allowed.