|
| 1 | +import json |
| 2 | +import torch |
| 3 | +import nltk |
| 4 | +import pickle |
| 5 | +import random |
| 6 | +from datetime import datetime |
| 7 | +import numpy as np |
| 8 | +import pandas as pd |
| 9 | + |
| 10 | +from nnet import NeuralNet |
| 11 | +from nltk_utils import bag_of_words |
| 12 | +from flask import Flask, render_template, url_for, request, jsonify |
| 13 | + |
| 14 | +random.seed(datetime.now()) |
| 15 | + |
| 16 | +device = torch.device('cpu') |
| 17 | +FILE = "models/data.pth" |
| 18 | +model_data = torch.load(FILE) |
| 19 | + |
| 20 | +input_size = model_data['input_size'] |
| 21 | +hidden_size = model_data['hidden_size'] |
| 22 | +output_size = model_data['output_size'] |
| 23 | +all_words = model_data['all_words'] |
| 24 | +tags = model_data['tags'] |
| 25 | +model_state = model_data['model_state'] |
| 26 | + |
| 27 | +nlp_model = NeuralNet(input_size, hidden_size, output_size).to(device) |
| 28 | +nlp_model.load_state_dict(model_state) |
| 29 | +nlp_model.eval() |
| 30 | + |
| 31 | +diseases_description = pd.read_csv("data/symptom_Description.csv") |
| 32 | +diseases_description['Disease'] = diseases_description['Disease'].apply(lambda x: x.lower().strip(" ")) |
| 33 | + |
| 34 | +disease_precaution = pd.read_csv("data/symptom_precaution.csv") |
| 35 | +disease_precaution['Disease'] = disease_precaution['Disease'].apply(lambda x: x.lower().strip(" ")) |
| 36 | + |
| 37 | +symptom_severity = pd.read_csv("data/Symptom-severity.csv") |
| 38 | +symptom_severity = symptom_severity.applymap(lambda s: s.lower().strip(" ").replace(" ", "") if type(s) == str else s) |
| 39 | + |
| 40 | + |
| 41 | +with open('data/list_of_symptoms.pickle', 'rb') as data_file: |
| 42 | + symptoms_list = pickle.load(data_file) |
| 43 | + |
| 44 | +with open('models/fitted_model.pickle', 'rb') as modelFile: |
| 45 | + prediction_model = pickle.load(modelFile) |
| 46 | + |
| 47 | +user_symptoms = set() |
| 48 | + |
| 49 | +app = Flask(__name__) |
| 50 | + |
| 51 | +def get_symptom(sentence): |
| 52 | + sentence = nltk.word_tokenize(sentence) |
| 53 | + X = bag_of_words(sentence, all_words) |
| 54 | + X = X.reshape(1, X.shape[0]) |
| 55 | + X = torch.from_numpy(X) |
| 56 | + |
| 57 | + output = nlp_model(X) |
| 58 | + _, predicted = torch.max(output, dim=1) |
| 59 | + tag = tags[predicted.item()] |
| 60 | + |
| 61 | + probs = torch.softmax(output, dim=1) |
| 62 | + prob = probs[0][predicted.item()] |
| 63 | + prob = prob.item() |
| 64 | + |
| 65 | + return tag, prob |
| 66 | + |
| 67 | +@app.route('/') |
| 68 | +def index(): |
| 69 | + data = [] |
| 70 | + user_symptoms.clear() |
| 71 | + file = open("static/assets/files/ds_symptoms.txt", "r") |
| 72 | + all_symptoms = file.readlines() |
| 73 | + for s in all_symptoms: |
| 74 | + data.append(s.replace("'", "").replace("_", " ").replace(",\n", "")) |
| 75 | + data = json.dumps(data) |
| 76 | + |
| 77 | + return render_template('index.html', data=data) |
| 78 | + |
| 79 | + |
| 80 | +@app.route('/symptom', methods=['GET', 'POST']) |
| 81 | +def predict_symptom(): |
| 82 | + print("Request json:", request.json) |
| 83 | + sentence = request.json['sentence'] |
| 84 | + if sentence.replace(".", "").replace("!","").lower().strip() == "done": |
| 85 | + |
| 86 | + if not user_symptoms: |
| 87 | + response_sentence = random.choice( |
| 88 | + ["I can't know what disease you may have if you don't enter any symptoms :)", |
| 89 | + "Meddy can't know the disease if there are no symptoms...", |
| 90 | + "You first have to enter some symptoms!"]) |
| 91 | + else: |
| 92 | + x_test = [] |
| 93 | + |
| 94 | + for each in symptoms_list: |
| 95 | + if each in user_symptoms: |
| 96 | + x_test.append(1) |
| 97 | + else: |
| 98 | + x_test.append(0) |
| 99 | + |
| 100 | + x_test = np.asarray(x_test) |
| 101 | + disease = prediction_model.predict(x_test.reshape(1,-1))[0] |
| 102 | + print(disease) |
| 103 | + |
| 104 | + description = diseases_description.loc[diseases_description['Disease'] == disease.strip(" ").lower(), 'Description'].iloc[0] |
| 105 | + precaution = disease_precaution[disease_precaution['Disease'] == disease.strip(" ").lower()] |
| 106 | + precautions = 'Precautions: ' + precaution.Precaution_1.iloc[0] + ", " + precaution.Precaution_2.iloc[0] + ", " + precaution.Precaution_3.iloc[0] + ", " + precaution.Precaution_4.iloc[0] |
| 107 | + response_sentence = "It looks to me like you have " + disease + ". <br><br> <i>Description: " + description + "</i>" + "<br><br><b>"+ precautions + "</b>" |
| 108 | + |
| 109 | + severity = [] |
| 110 | + |
| 111 | + for each in user_symptoms: |
| 112 | + severity.append(symptom_severity.loc[symptom_severity['Symptom'] == each.lower().strip(" ").replace(" ", ""), 'weight'].iloc[0]) |
| 113 | + |
| 114 | + if np.mean(severity) > 4 or np.max(severity) > 5: |
| 115 | + response_sentence = response_sentence + "<br><br>Considering your symptoms are severe, and Meddy isn't a real doctor, you should consider talking to one. :)" |
| 116 | + |
| 117 | + user_symptoms.clear() |
| 118 | + severity.clear() |
| 119 | + |
| 120 | + else: |
| 121 | + symptom, prob = get_symptom(sentence) |
| 122 | + print("Symptom:", symptom, ", prob:", prob) |
| 123 | + if prob > .5: |
| 124 | + response_sentence = f"Hmm, I'm {(prob * 100):.2f}% sure this is " + symptom + "." |
| 125 | + user_symptoms.add(symptom) |
| 126 | + else: |
| 127 | + response_sentence = "I'm sorry, but I don't understand you." |
| 128 | + |
| 129 | + print("User symptoms:", user_symptoms) |
| 130 | + |
| 131 | + return jsonify(response_sentence.replace("_", " ")) |
0 commit comments