Skip to content

End to End Processing

George Richardson edited this page May 3, 2017 · 3 revisions

This assumes you are working in a Notebook.

1. Access the database and start a session:

import os
import sys
from sqlalchemy import create_engine
from sqlalchemy import exc
from sqlalchemy import func
from sqlalchemy import Table, text
module_path = os.path.abspath(os.path.join('..'))
if module_path not in sys.path:
    sys.path.append(module_path)
from internal_displacement.model.model import Status, Session, Category, Article, Content, Country, CountryTerm, \
    Location, Report, ReportDateSpan, ArticleCategory, Base

db_url = 'postgresql://{user}:{password}@{db_host}/{db}'.format(
        user='jupyter', password='jupyter', db_host=db_host, db='id')

engine = create_engine(db_url)
Session.configure(bind=engine)
session = Session()

2. Import the relevant modules:

import spacy
from internal_displacement.scraper import Scraper
from internal_displacement.interpreter import Interpreter
from internal_displacement.pipeline import Pipeline
from internal_displacement.add_countries import load_countries, delete_countries
import pandas as pd

3. If necessary, pre-load countries into database:

load_countries(session)

4. Set up the Scraper and Interpreter:

scraper = Scraper()
nlp = spacy.load('en')
person_reporting_terms = [
    'displaced', 'evacuated', 'forced', 'flee', 'homeless', 'relief camp',
    'sheltered', 'relocated', 'stranded', 'stuck', 'stranded', "killed", "dead", "died", "drown"
]

structure_reporting_terms = [
    'destroyed', 'damaged', 'swept', 'collapsed',
    'flooded', 'washed', 'inundated', 'evacuate'
]

person_reporting_units = ["families", "person", "people", "individuals", "locals", "villagers", "residents",
                            "occupants", "citizens", "households", "life"]

structure_reporting_units = ["home", "house", "hut", "dwelling", "building", "shop", "business", "apartment",
                                     "flat", "residence"]

relevant_article_terms = ['Rainstorm', 'hurricane',
                          'tornado', 'rain', 'storm', 'earthquake']
relevant_article_lemmas = [t.lemma_ for t in nlp(
    " ".join(relevant_article_terms))]

data_path = '../data'
interpreter = Interpreter(nlp, person_reporting_terms, structure_reporting_terms, person_reporting_units,
                          structure_reporting_units, relevant_article_lemmas, data_path,
                          model_path='../internal_displacement/classifiers/default_model.pkl',
                          encoder_path='../internal_displacement/classifiers/default_encoder.pkl')

5. Initialize the Pipeline:

pipeline = Pipeline(session, scraper, interpreter)

6. Process a list of URLs:

for url in url_list:
    try:
        pipeline.process_url(url)
    except exc.IntegrityError:
        session.rollback()

7. Check the number of articles in the database:

print("{} articles in database".format(session.query(Article.id).count()))

8. Check the status of the articles in the database:

article_stats = session.query(Article.status, func.count(Article.status)).group_by(Article.status).all()
print("Article statuses:")
for status, ct in article_stats:
    print("{}: {}".format(status, ct))