Sentiment Analysi Notes 1.0 NLTK Basic

NLTK

Practical examples of natural language processing (NLP) like speech recognition, speech translation, understanding complete sentences, understanding synonyms of matching words, and writing complete grammatically correct sentences and paragraphs.

Benefits

  1. Search engines Google
  2. Social website feeds Facebook
  3. Speech engine Siri
    open source Natural Language Processing (NLP) libraries
  • Natural language toolkit (NLTK)
  • Apache OpenNLP
  • Stanford NLP suite
  • Gate NLP library
    pip3 install nltk
1
2
3
4
# python version varies
import nltk
nltk.download()
# install all packages

Tokenize Text Using Pure Python

crawl webpage

1
2
3
4
5
6
7
8
9
10
11
12
13
import urllib.request
response = urllib.request.urlopen('http://php.net/')
html = response.read()
print (html)
# lots of html tags
# clean data
from bs4 import BeautifulSoup
import urllib.request
response = urllib.request.urlopen('http://php.net/')
html = response.read()
soup = BeautifulSoup(html,"html5lib")
text = soup.get_text(strip=True)
print (text)

HTML

1
2
3
4
5
6
7
8
9
10
# convert text into tokens
from bs4 import BeautifulSoup
import urllib.request
response = urllib.request.urlopen('http://php.net/')
html = response.read()
soup = BeautifulSoup(html,"html5lib")
text = soup.get_text(strip=True)
tokens = [t for t in text.split()]
print (tokens)

BS4

Count Word Frequency

calculate frequency using FreqDist()

1
2
3
4
5
6
7
8
9
10
11
12
13
from bs4 import BeautifulSoup
import urllib.request
import nltk
response = urllib.request.urlopen('http://php.net/')
html = response.read()
soup = BeautifulSoup(html,"html5lib")
text = soup.get_text(strip=True)
tokens = [t for t in text.split()]
freq = nltk.FreqDist(tokens)
for key,val in freq.items():
print (str(key) + ':' + str(val))
# plot a graph
freq.plot(20, cumulative=False)

freq

Screen Shot 2017-11-14 at 11.02.25 AM

Remove Stop Words Using NLTK

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
from bs4 import BeautifulSoup
import urllib.request
import nltk
from nltk.corpus import stopwords
response = urllib.request.urlopen('http://php.net/')
html = response.read()
soup = BeautifulSoup(html,"html5lib")
text = soup.get_text(strip=True)
tokens = [t for t in text.split()]
# copy a list of tokens
clean_tokens = tokens[:]
sr = stopwords.words('english')
for token in tokens:
if token in stopwords.words('english'):
clean_tokens.remove(token)
freq = nltk.FreqDist(clean_tokens)
for key,val in freq.items():
print (str(key) + ':' + str(val))
# plot
freq.plot(20,cumulative=False)

Screen Shot 2017-11-14 at 11.02.35 AM

Tokenize the text we got

use PunktSentenceTokenizer

1
2
3
from nltk.tokenize import word_tokenize
mytext = "Hello Mr. Adam, how are you? I hope everything is going well. Today is a good day, see you dude."
print(word_tokenize(mytext))

stop

tokenize non-english languages text

1
2
3
from nltk.tokenize import sent_tokenize
mytext = "Bonjour M. Adam, comment allez-vous? J'espère que tout va bien. Aujourd'hui est un bon jour."
print(sent_tokenize(mytext,"french"))

finding synonyms wordnet (similar)

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
from nltk.corpus import wordnet
syn = wordnet.synsets("pain")
print(syn[0].definition())
print(syn[0].examples())
from nltk.corpus import wordnet
syna = wordnet.synsets("NLP")
print(syna[0].definition())
syna = wordnet.synsets("Python")
print(syna[0].definition())
from nltk.corpus import wordnet
synonyms = []
for syn in wordnet.synsets('Computer'):
for lemma in syn.lemmas():
synonyms.append(lemma.name())
print(synonyms)

Screen Shot 2017-11-14 at 10.26.22 AM

finding synonyms wordnet (opposite)

1
2
3
4
5
6
7
from nltk.corpus import wordnet
antonyms = []
for syn in wordnet.synsets("new"):
for l in syn.lemmas():
if l.antonyms():
antonyms.append(l.antonyms()[0].name())
print(antonyms)

Screen Shot 2017-11-14 at 10.27.22 AM

stemming word

1
2
3
4
5
6
# original word
from nltk.stem import PorterStemmer
stemmer = PorterStemmer()
print(stemmer.stem('working’))
# work

stem non-english words

1
2
3
4
5
6
# languages can stem
from nltk.stem import SnowballStemmer
print(SnowballStemmer.languages
from nltk.stem import SnowballStemmer
french_stemmer = SnowballStemmer('french')
print(french_stemmer.stem("French word"))

lemmatize words using word net (more accurate than stem)

1
2
3
4
5
6
7
# define a proposed type of word
from nltk.stem import WordNetLemmatizer
lemmatizer = WordNetLemmatizer()
print(lemmatizer.lemmatize('playing', pos="v"))
print(lemmatizer.lemmatize('playing', pos="n"))
print(lemmatizer.lemmatize('playing', pos="a"))
print(lemmatizer.lemmatize('playing', pos="r"))