dd / tara ja jeet.txt
rr's picture
Upload tara ja jeet.txt
54319f7
Classification
from nltk.corpus import names
l = ([(name, 'male') for name in names.words('male.txt')] +
[(name, 'female') for name in names.words('female.txt')])
print("\nNumber of male names:")
print(len(names.words('male.txt')))
print("\nNumber of female names:")
print(len(names.words('female.txt')))
male_names = names.words('male.txt')
female_names = names.words('female.txt')
print("\nFirst 10 male names:")
print(male_names[0:15])
print("\nFirst 10 female names:")
print(female_names[0:15])
import random
random.shuffle(n)
def gender_features(word):
return{'last_letter' : word[-1]}
feature_sets = [(gender_features(n), gender) for (n, gender) in l]
train_set, test_set = feature_sets[1000:], feature_sets[:1000]
from nltk import NaiveBayesClassifier
model = NaiveBayesClassifier.train(train_set)
model.classify(gender_features('#whatever he asks'))
model.classify(gender_features('#whatever he asks'))
Clustering
Hierarchical
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.cluster import KMeans
documents = ['Mr. and Mrs. Dursley, of number four, Privet Drive, were proud to say that they were perfectly normal, thank you very much.',
'They were the last people you’d expect to be involved in anything strange or mysterious, because they just didn’t hold with such nonsense.',
'Mr. Dursley was the director of a firm called Grunnings, which made drills.',
'He was a big, beefy man with hardly any neck, although he did have a very large mustache.',
'Mrs. Dursley was thin and blonde and had nearly twice the usual amount of neck, which came in very useful as she spent so much of her time craning over garden fences, spying on the neighbors.',
'The Dursley s had a small son called Dudley and in their opinion there was no finer boy anywhere.']
documents
vectorizer = TfidfVectorizer(stop_words = 'english')
X = vectorizer.fit_transform(documents)
terms = vectorizer.get_feature_names()
from sklearn.metrics.pairwise import cosine_similarity
dist = 1- cosine_similarity(X)
dist
import matplotlib.pyplot as plt
from scipy.cluster.hierarchy import ward, dendrogram
linkage_matrix = ward(dist)
fig, ax = plt.subplots(figsize = (8,8)) #set size
ax = dendrogram(linkage_matrix, orientation = 'right', labels = documents);
plt.tick_params(\
axis = 'x',
which = 'both',
bottom = 'off',
top = 'off',
labelbottom = 'off')
plt.tight_layout()
K Means
model = KMeans(n_clusters = 2, init = 'k-means++', max_iter = 100, n_init = 1)
model.fit(X)
# top ten terms/words per cluster
order_centroids = model.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(2):
print("Cluster Number:", i),
for c in order_centroids[i, :10]:
print('%s' % terms[c])
Y = vectorizer.transform(["Harry Potter is not Harry Styles"])
model.predict(Y)
Preprocessing
External Data Preprocessing(importing dataset, defining the function)
import re
import nltk
import inflect
from nltk import word_tokenize, sent_tokenize
from nltk.corpus import stopwords
from nltk.stem import LancasterStemmer, WordNetLemmatizer
file = open("dataset path.txt", encoding = 'utf-8').read()
words = word_tokenize(file)
def to_lowercase(words):
#'''Convert all the characters into lowercase from the list of tokenized words'''
new_words = []
for word in words:
new_word = word.lower()
new_words.append(new_word)
return new_words
words = to_lowercase(words)
#print(words)
def remove_punctuation(words):
#'''Remove all the punctuation marks from the list of tokenized words'''
new_words = []
for word in words:
new_word = re.sub(r'[^\w\s]', '', word)
if new_word != '':
new_words.append(new_word)
return new_words
words = remove_punctuation(words)
#print(words)
def replace_numbers(words):
#'''Replace all integer occurrences in the list of tokenized words'''
p = inflect.engine()
new_words = []
for word in words:
if word.isdigit():
new_word = p.number_to_words(word)
new_words.append(new_word)
else:
new_words.append(word)
return(new_words)
words = replace_numbers(words)
#print(words)
def remove_stopwords(words):
#'''Remove stop words from the list of tokenized words'''
new_words = []
for word in words:
if word not in stopwords.words('english'):
new_words.append(word)
return new_words
words = remove_stopwords(words)
#print(words)
def stem_words(words):
#'''Finding stem words in the list of tokenized words'''
stemmer = LancasterStemmer()
stems = []
for word in words:
stem = stemmer.stem(word)
stems.append(stem)
return stems
words = stem_words(words)
#print(words)
def lemmatize_words(words):
#'''Lemmatize verbs in the list of tokenized words'''
lemmatizer = WordNetLemmatizer()
lemmas = []
for word in words:
lemma = lemmatizer.lemmatize(word, pos = 'v')
lemmas.append(lemma)
return lemmas
words = lemmatize_words(words)
#print(words)
print(words)
Text preprocessing(non user defined)
import nltk
import re
import string
import inflect
from nltk.corpus import stopwords
from nltk import word_tokenize
series = open("dataset path.txt".txt").read()
series
series_lower = series.lower()
# Removal of numbers
result1 = re.sub(r'\d+', '', series_lower)
#result1
# Removal of punctuations
result2 = result1.translate(str.maketrans('','',string.punctuation))
#result2
# Removing white spaces
result3 = result2.strip()
#result3
# Removal of stopwords
# Tokenize the text
result3_tokens = word_tokenize(result3)
#result3_tokens
# Removing stopwords
sw = set(stopwords.words('english'))
result4 = []
for w in result3_tokens:
if w not in sw:
result4.append(w)
#result4
text_tokenize = result4
#text_tokenize
output = nltk.pos_tag(text_tokenize)
#output
Sentiment Analysis
import pandas as pd
import re
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from nltk.stem import WordNetLemmatizer
import nltk
from wordcloud import WordCloud
import matplotlib.pyplot as plt
file = open("dataset path.txt".txt", encoding = 'utf-8').read()
# These are not required. DO Only if asked.
# this code, clean data 2 and clean data 3
cleandata1 = file.lower()
#cleandata1
cleandata2 = re.sub(r'[^\w\s]','', cleandata1)
#cleandata2
cleandata3 = re.sub(r'\d+', ' ', cleandata2)
#cleandata3
stop_words = set(stopwords.words('english'))
#stop_words
#let us remove them using function removeWords()
tokens = word_tokenize(cleandata3)
cleandata4 = [i for i in tokens if not i in stop_words]
cleandata4
cleandata4 = " ".join(str(x) for x in cleandata4)
#cleandata4
cleandata5 = ' '.join(i for i in cleandata4.split() if not (i.isalpha() and len(i)==1))
#cleandata5
cleandata6 = cleandata5.strip()
#cleandata6
## Frequency of words
words_dict = {}
for word in cleandata6.split():
words_dict[word] = words_dict.get(word, 0)+1
for key in sorted(words_dict):
print("{}:{}".format(key,words_dict[key]))
wordcloud = WordCloud(width=480, height=480, margin=0).generate(cleandata6)
# Display the generated image:
plt.imshow(wordcloud, interpolation='bilinear')
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
#with max words
wordcloud = WordCloud(width=480, height=480, max_words=5).generate(cleandata6)
plt.figure()
plt.imshow(wordcloud, interpolation="bilinear")
plt.axis("off")
plt.margins(x=0, y=0)
plt.show()
from textblob import TextBlob
from textblob.sentiments import NaiveBayesAnalyzer
Bag of Words
from sklearn.feature_extraction.text import CountVectorizer
sentences = ["Hello how are you",
"Hi students are you all good",
"Okay lets study bag of words"]
sentences
cv = CountVectorizer()
bow = cv.fit_transform(sentences).toarray()
cv.vocabulary_
cv.get_feature_names()
bow
NLTK Basics
import nltk
from nltk.book import *
#similar
text6.similar('King')
text6.concordance('King')
sents()
len(text1)
#lines tells how many lines you want. You can run the code without the lines also
text3.concordance('lived', lines = 38)
text3.common_contexts(['earth', 'heaven'])
text1.common_contexts(['captain', 'whale'])
#text3.collocations()
text3.collocation_list()
#Put number inside bracket to get only how many is required
text6.collocation_list(5)
text6.generate(5)
len(text3)
from nltk import lm
help(lm)
text = "Hello students, we are studying Parts of Speech Tagging. Lets understand the process of\
shallow parsing or Chunking. Here were are drawing the tree corresponding to the words \
and the POS tags based on a set grammer regex patter."
words = nltk.word_tokenize(text)
#words
tags = nltk.pos_tag(words)
#tags
# idk what this is
grammar = (''' NP: {<DT><JJ><NN>} ''')
grammar
freq = FreqDist(text3)
freq
freq.most_common(50)
freq['father']
freq.plot(20, cumulative = True)
freq.plot(20)
freq.tabulate()
freq.max()
[i for i in sent3 if len(i) > 8]
[i for i in sent3 if len(i) != 3]
[i for i in sent3 if len(i) <= 3]
l = []
for i in sent3:
if((len(i)) <= 3):
l.append(i)
print(l)
# print(len(l))
Simple Regex
Regex on strings¶
import re
egstring = '''
Jessica is 15 years old, and Daniel is 27 years old.
Edward is 97 years old, and his grandfather, Oscar, is 108 years old
'''
ages = re.findall(r'\d{1,3}', egstring)
names = re.findall(r'[A-Z][a-z]*', egstring)
print(ages)
print(names)
result = re.split(r'\d{1,3}', egstring)
print(result)
string = "Python is fun"
match = re.search('\APython', string)
if match:
print("pattern found inside the string")
else:
print("pattern not found")
Email
# example
#pattern = r'\w{4}_\d{2}\w{5}.\w{4}@w{5}.\w{3}
#pattern1 = r'[a-z]+_[0-9a-z]+.[a-z]+@[a-z.]+'
#email_string = "[email protected]"
generic_pattern = r'[a-zA-Z0-9._]+@[a-z]+.[a-z]+'
email_string1 = "[email protected]"
if(re.match(generic_pattern, email_string1) != None):
print(True)
else:
print(False)
# Entering an Email
email = input ("Enter an email")
email_list = ["[email protected]", "[email protected]", " [email protected]",
"[email protected]", "[email protected]", "[email protected]"]
email_list.append(email)
print(email_list)
#Function definition
def email_match(email_ls):
count = len(email_ls)
gmail_pattern = r'[a-zA-Z0-9._]+@gmail.[a-z]+'
hotmail_pattern = r'[a-zA-Z0-9._]+@hotmail.[a-z]+'
yahoo_pattern = r'[a-zA-Z0-9._]+@yahoo.[a-z]+'
print("---")
print("GMAIL MAILS")
for i in range(0,count):
if(re.match(gmail_pattern, email_ls[i]) != None):
print(email_ls[i])
print("---")
print("HOTMAIL MAILS")
for i in range(0,count):
if(re.match(hotmail_pattern, email_ls[i]) != None):
print(email_ls[i])
print("---")
print("YAHOO MAILS")
for i in range(0,count):
if(re.match(yahoo_pattern, email_ls[i]) !=None):
print(email_ls[i])
#Calling the function
email_match(email_list)
POS
import nltk
from nltk import pos_tag
from nltk import word_tokenize
sample_text = word_tokenize("The classes are reopening on 15th March in St. Joseph's College of Commerce")
sample_text
pos_tag(sample_text)
nltk.help.upenn_tagset("DT")
nltk.help.upenn_tagset("VBP")
# do for what is asked or how many ever are asked
#nltk.help.upenn_tagset("NNS")
text = nltk.Text(word.lower() for word in nltk.corpus.brown.words())
text
text.similar("boy")
text.similar("test")
var1 = nltk.tag.str2tuple("SJCC/NNP")
var1
var1[1]
sentence = '''
The/DT classes/NNS are/VBP reopening/VBG from/IN 15th/CD March'2021/NNP in/IN St./NNP Joseph/NNP 's/POS College/NNP
'''
sentence
abc = [nltk.tag.str2tuple(i) for i in sentence.split()]
abc
abc = [nltk.tag.str2tuple(i) for i in sentence.split()]
abc
nltk.corpus.brown.tagged_words()
nltk.help.brown_tagset('AT')
nltk.help.brown_tagset('NP-TL')
nltk.corpus.indian.tagged_words()
nltk.help.indian_tagset('SYM')