In this project, we use the data from kaggle competition Toxic Comment Classification Challenge by Jigsaw and only use the training data. Then we have break this raw training data into train and test data and evaluate the model performances in test data.
The dataset is taken from wikipedia edit text and is classified as one of the following:
This is a multi-label (not-multiclass) classification. One text row has six labels and exactly one label is 1 and other labels are 0.
References:
import os
import sys
import time
time_start_notebook = time.time()
%%capture
import os
import sys
ENV_COLAB = 'google.colab' in sys.modules
if ENV_COLAB:
## install modules
!pip install watermark
!pip install scikit-plot
!pip install sentencepiece # xlnet needs this
!pip install transformers
# data science
import numpy as np
import pandas as pd
from tqdm import tqdm
# visualization
import seaborn as sns
sns.set(color_codes=True)
import matplotlib
import matplotlib.pyplot as plt
%matplotlib inline
# mixed
import os
import time
from pprint import pprint
import joblib
import pickle
from tqdm import tqdm, trange
from ast import literal_eval
# random state
SEED=100
np.random.seed(SEED)
# machine learning
import sklearn
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MultiLabelBinarizer
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix, multilabel_confusion_matrix
from sklearn.metrics import f1_score, accuracy_score
# deep learning
import tensorflow as tf
from keras.preprocessing.sequence import pad_sequences
import torch
from torch.nn import BCEWithLogitsLoss, BCELoss
from torch.utils.data import TensorDataset, DataLoader
from torch.utils.data import RandomSampler, SequentialSampler
import sentencepiece # xlnet needs this
import transformers
# model eval
import scikitplot as skplt
# versions
import watermark
%load_ext watermark
%watermark -a "Bhishan Poudel" -d -v -m
print()
%watermark -iv
Bhishan Poudel 2020-12-03 CPython 3.6.9 IPython 5.5.0 compiler : GCC 8.4.0 system : Linux release : 4.19.112+ machine : x86_64 processor : x86_64 CPU cores : 2 interpreter: 64bit tensorflow 2.3.0 seaborn 0.11.0 torch 1.7.0+cu101 matplotlib 3.2.2 pandas 1.1.4 sklearn 0.22.2.post1 joblib 0.17.0 numpy 1.18.5 scikitplot 0.3.7 transformers 4.0.0 watermark 2.0.2
def show_methods(obj, ncols=4,contains=None):
lst = [i for i in dir(obj) if i[0]!='_' ]
if contains is not None:
lst = [i for i in lst if contains in i]
df = pd.DataFrame(np.array_split(lst,ncols)).T.fillna('')
return df
device_name = tf.test.gpu_device_name()
if device_name != '/device:GPU:0':
raise SystemError('GPU device not found')
print('Found GPU at: {}'.format(device_name))
Found GPU at: /device:GPU:0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
n_gpu = torch.cuda.device_count()
torch.cuda.get_device_name(0)
'Tesla K80'
%%capture
!wget https://github.com/bhishanpdl/Datasets/blob/master/Projects/Jigsaw_Toxic_Comment_Classification/train.csv.zip?raw=true
!unzip train.csv.zip?raw=true
!wget https://github.com/bhishanpdl/Datasets/blob/master/Projects/Jigsaw_Toxic_Comment_Classification/test.csv.zip?raw=true
!unzip test.csv.zip?raw=true
# if ENV_COLAB:
# ## mount google drive
# from google.colab import drive
# drive.mount('/content/drive')
# ## load the data dir
# dat_dir = 'drive/MyDrive/Colab Notebooks/data/Toxic_Comments/raw'
# sys.path.append(dat_dir)
!ls
sample_data 'test.csv.zip?raw=true' 'train.csv.zip?raw=true' test.csv train.csv
df_train = pd.read_csv('train.csv')
df_train.head()
id | comment_text | toxic | severe_toxic | obscene | threat | insult | identity_hate | |
---|---|---|---|---|---|---|---|---|
0 | 8d603d50affa1126 | "\nYes, aside, thank you for trying to answer ... | 0 | 0 | 0 | 0 | 0 | 0 |
1 | 8fb3576937b9e0d0 | March 2010 (UTC)\n\nThanks! and understood abo... | 0 | 0 | 0 | 0 | 0 | 0 |
2 | 379440e04fb68e27 | "\n\n The Outfield \n\nHahaha - compassion is ... | 0 | 0 | 0 | 0 | 0 | 0 |
3 | 6be4446aac8ae028 | Opposition is a source of strength. I believe ... | 0 | 0 | 0 | 0 | 0 | 0 |
4 | 1a2ff7ed958506a3 | Please discontinue making those unsupported ch... | 0 | 0 | 0 | 0 | 0 | 0 |
col_text = 'comment_text'
# unique text
df_train[col_text].nunique(), df_train.shape[0]
(127656, 127656)
# null values
df_train.isnull().sum()
id 0 comment_text 0 toxic 0 severe_toxic 0 obscene 0 threat 0 insult 0 identity_hate 0 dtype: int64
cols_label = ['toxic', 'severe_toxic', 'obscene',
'threat', 'insult', 'identity_hate']
print('Count of 1 per label: \n', df_train[cols_label].sum(), '\n')
print('Count of 0 per label: \n', df_train[cols_label].eq(0).sum())
Count of 1 per label: toxic 12202 severe_toxic 1282 obscene 6782 threat 379 insult 6292 identity_hate 1136 dtype: int64 Count of 0 per label: toxic 115454 severe_toxic 126374 obscene 120874 threat 127277 insult 121364 identity_hate 126520 dtype: int64
df_train[cols_label].sum().plot.bar(title='Count of 1s',color='tomato');
df_train[cols_label].eq(0).sum().plot.bar(title='Count of 0s');
# shuffle data
df_train = df_train.sample(frac=1,random_state=SEED).reset_index(drop=True)
col_ohe = 'one_hot_labels'
df_train[col_ohe] = df_train[cols_label].to_numpy().tolist()
df_train.head(2)
id | comment_text | toxic | severe_toxic | obscene | threat | insult | identity_hate | one_hot_labels | |
---|---|---|---|---|---|---|---|---|---|
0 | 815dac68f62b1e6a | "\n\n Defenestration \n\nIt was previously rep... | 0 | 0 | 0 | 0 | 0 | 0 | [0, 0, 0, 0, 0, 0] |
1 | 570a66d523877761 | I am easily able to trace my lineage back to C... | 0 | 0 | 0 | 0 | 0 | 0 | [0, 0, 0, 0, 0, 0] |
labels = list(df_train[col_ohe].values)
list_text = list(df_train[col_text].values)
# list_text[0]
Transformers pretrained tokenizers
BERT:
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True)
XLNet:
tokenizer = XLNetTokenizer.from_pretrained('xlnet-base-cased', do_lower_case=False)
RoBERTa:
tokenizer = RobertaTokenizer.from_pretrained('roberta-base', do_lower_case=False)
# show_methods(transformers,3,contains='Tokenizer')
tokenizer = transformers.XLNetTokenizer.from_pretrained(
'xlnet-base-cased', do_lower_case=False)
%%time
max_length = 100 # choose about 100 for colab
encodings = tokenizer.batch_encode_plus(list_text,
max_length=max_length,
pad_to_max_length=True)
print('tokenizer outputs: ', encodings.keys())
# dict_keys(['input_ids', 'token_type_ids', 'attention_mask'])
# Wall time: 3min 28s
Truncation was not explicitly activated but `max_length` is provided a specific value, please use `truncation=True` to explicitly truncate examples to max length. Defaulting to 'longest_first' truncation strategy. If you encode pairs of sequences (GLUE-style) with the tokenizer you can select this strategy more precisely by providing a specific strategy to `truncation`. /usr/local/lib/python3.6/dist-packages/transformers/tokenization_utils_base.py:2142: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert). FutureWarning,
tokenizer outputs: dict_keys(['input_ids', 'token_type_ids', 'attention_mask']) CPU times: user 1min, sys: 474 ms, total: 1min 1s Wall time: 1min 1s
input_ids = encodings['input_ids']
token_type_ids = encodings['token_type_ids']
attention_masks = encodings['attention_mask']
# Identifying indices of 'one_hot_labels' entries that only occur once
# This will allow us to stratify split our training data later
label_counts = df_train[col_ohe].astype(str).value_counts()
one_freq = label_counts[label_counts==1].keys()
cond = df_train[col_ohe].astype(str).isin(one_freq)
one_freq_idxs = df_train[cond].index
one_freq_idxs = sorted(list(one_freq_idxs), reverse=True)
print('df_train label indices with only one instance: ', one_freq_idxs)
df_train label indices with only one instance: [113097, 57059, 7039]
# Gathering single instance inputs to force
# into the training set after stratified split
one_freq_input_ids = [input_ids.pop(i) for i in one_freq_idxs]
one_freq_token_types = [token_type_ids.pop(i) for i in one_freq_idxs]
one_freq_attention_masks = [attention_masks.pop(i) for i in one_freq_idxs]
one_freq_labels = [labels.pop(i) for i in one_freq_idxs]
%%time
# train valid split using stratify
train_inputs, valid_inputs,\
train_labels, valid_labels,\
train_token_types, valid_token_types,\
train_masks, valid_masks\
= train_test_split(input_ids, labels, token_type_ids,attention_masks,
random_state=SEED,
test_size=0.10,
stratify = labels)
# Add one frequency data to train data
train_inputs.extend(one_freq_input_ids)
train_labels.extend(one_freq_labels)
train_masks.extend(one_freq_attention_masks)
train_token_types.extend(one_freq_token_types)
# Convert all of our data into torch tensors
train_inputs = torch.tensor(train_inputs)
train_labels = torch.tensor(train_labels)
train_masks = torch.tensor(train_masks)
train_token_types = torch.tensor(train_token_types)
valid_inputs = torch.tensor(valid_inputs)
valid_labels = torch.tensor(valid_labels)
valid_masks = torch.tensor(valid_masks)
valid_token_types = torch.tensor(valid_token_types)
CPU times: user 3.68 s, sys: 127 ms, total: 3.81 s Wall time: 3.82 s
from torch.utils.data import (TensorDataset, DataLoader,
RandomSampler, SequentialSampler)
# Select a batch size for training.
# For fine-tuning with XLNet, the authors recommend
# a batch size of 32, 48, or 128.
# We will use 32 here to avoid memory issues.
batch_size = 32
# Create an iterator of our data with torch DataLoader.
# This helps save on memory during training because, unlike a for loop,
# with an iterator the entire dataset does not need to be loaded into memory
train_data = TensorDataset(train_inputs, train_masks,
train_labels, train_token_types)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler,
batch_size=batch_size)
valid_data = TensorDataset(valid_inputs, valid_masks,
valid_labels, valid_token_types)
valid_sampler = SequentialSampler(valid_data)
valid_dataloader = DataLoader(valid_data, sampler=valid_sampler,
batch_size=batch_size)
# # save data loaders
# torch.save(train_dataloader,'train_data_loader')
# torch.save(valid_dataloader,'valid_data_loader')
# !ls
# show_methods(transformers,contains='SequenceClassification',ncols=2)
%%time
# Load classification model
num_labels = len(cols_label)
model = transformers.XLNetForSequenceClassification.from_pretrained(
'xlnet-base-cased', num_labels=num_labels)
model.cuda()
Some weights of the model checkpoint at xlnet-base-cased were not used when initializing XLNetForSequenceClassification: ['lm_loss.weight', 'lm_loss.bias'] - This IS expected if you are initializing XLNetForSequenceClassification from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model). - This IS NOT expected if you are initializing XLNetForSequenceClassification from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model). Some weights of XLNetForSequenceClassification were not initialized from the model checkpoint at xlnet-base-cased and are newly initialized: ['sequence_summary.summary.weight', 'sequence_summary.summary.bias', 'logits_proj.weight', 'logits_proj.bias'] You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.
CPU times: user 14.2 s, sys: 3.04 s, total: 17.2 s Wall time: 25.3 s
Setting custom optimization parameters for the AdamW optimizer https://huggingface.co/transformers/main_classes/optimizer_schedules.html
show_methods(transformers,3,contains='Adam')
0 | 1 | 2 | |
---|---|---|---|
0 | AdamW | AdamWeightDecay |
# setting custom optimization parameters.
params_opt = list(model.named_parameters())
no_decay = ['bias', 'gamma', 'beta']
params_opt_group = [
# non decay params
{'params': [p for n, p in params_opt
if not any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.01},
# decay params
{'params': [p for n, p in params_opt
if any(nd in n for nd in no_decay)],
'weight_decay_rate': 0.0}
]
optimizer = transformers.AdamW(params_opt_group,lr=2e-5,correct_bias=True)
# optimizer = AdamW(model.parameters(),lr=2e-5) # Default optimization
%%time
from tqdm import trange
# Store our loss and accuracy for plotting
train_loss_set = []
# Number of training epochs (authors recommend between 2 and 4)
epochs = 2
# trange is a tqdm wrapper around the normal python range
for _ in trange(epochs, desc="Epoch"):
# Training
# train model
model.train()
# Tracking variables
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
# Train the data for one epoch
for step, batch in enumerate(train_dataloader):
# Add batch to GPU
batch = tuple(b.to(device) for b in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
# Clear out the gradients (by default they accumulate)
optimizer.zero_grad()
# # Forward pass for multiclass classification
# outputs = model(b_input_ids, token_type_ids=None,
# attention_mask=b_input_mask, labels=b_labels)
# loss = outputs[0]
# logits = outputs[1]
# Forward pass for multilabel classification
outputs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
logits = outputs[0]
loss_func = BCEWithLogitsLoss()
loss = loss_func(logits.view(-1,num_labels),
b_labels.type_as(logits).view(-1,num_labels))
# loss_func = BCELoss()
# loss = loss_func(torch.sigmoid(logits.view(-1,num_labels)),
# b_labels.type_as(logits).view(-1,num_labels))
train_loss_set.append(loss.item())
# Backward pass
loss.backward()
# Update parameters and take a step using the computed gradient
optimizer.step()
# scheduler.step()
# Update tracking variables
tr_loss += loss.item()
nb_tr_examples += b_input_ids.size(0)
nb_tr_steps += 1
print("Train loss: {}".format(tr_loss/nb_tr_steps))
###############################################################################
# Validation
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
# Variables to gather full output
logit_preds,true_labels,pred_labels,tokenized_texts = [],[],[],[]
# Predict
for i, batch in enumerate(valid_dataloader):
batch = tuple(b.to(device) for b in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
with torch.no_grad():
# Forward pass
outs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
b_logit_pred = outs[0]
pred_label = torch.sigmoid(b_logit_pred)
b_logit_pred = b_logit_pred.detach().cpu().numpy()
pred_label = pred_label.to('cpu').numpy()
b_labels = b_labels.to('cpu').numpy()
tokenized_texts.append(b_input_ids)
logit_preds.append(b_logit_pred)
true_labels.append(b_labels)
pred_labels.append(pred_label)
# Flatten outputs
pred_labels = [item for sublist in pred_labels for item in sublist]
true_labels = [item for sublist in true_labels for item in sublist]
# Calculate Accuracy
threshold = 0.50
pred_bools = [pl>threshold for pl in pred_labels]
true_bools = [tl==1 for tl in true_labels]
val_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')*100
val_flat_accuracy = accuracy_score(true_bools, pred_bools)*100
print('F1 Validation Accuracy: ', val_f1_accuracy)
print('Flat Validation Accuracy: ', val_flat_accuracy)
Epoch: 0%| | 0/3 [00:00<?, ?it/s]
Train loss: 0.053938011192058946
Epoch: 33%|███▎ | 1/3 [1:18:20<2:36:40, 4700.06s/it]
F1 Validation Accuracy: 77.59160375621431 Flat Validation Accuracy: 92.59752467491775 Train loss: 0.03925298323015291
Epoch: 67%|██████▋ | 2/3 [2:36:51<1:18:23, 4703.60s/it]
F1 Validation Accuracy: 77.9295470884256 Flat Validation Accuracy: 92.48002506658311 Train loss: 0.03296533198568943
Epoch: 100%|██████████| 3/3 [3:54:46<00:00, 4695.36s/it]
F1 Validation Accuracy: 77.49077490774907 Flat Validation Accuracy: 92.07269309102303 CPU times: user 2h 25min 25s, sys: 1h 29min 2s, total: 3h 54min 28s Wall time: 3h 54min 46s
# torch.save(model.state_dict(), 'bert_model_toxic')
cols_label
['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']
df_test = pd.read_csv('test.csv')
df_test.head(2)
id | comment_text | toxic | severe_toxic | obscene | threat | insult | identity_hate | |
---|---|---|---|---|---|---|---|---|
0 | 70bbc3e96dd459b1 | Hammed it is, cheers! | 0 | 0 | 0 | 0 | 0 | 0 |
1 | 0b2e86f819b4b9a4 | Not a problem, sorry for the inconvenience and... | 0 | 0 | 0 | 0 | 0 | 0 |
print('Count of 1 per label: \n', df_test[cols_label].sum(), '\n')
print('Count of 0 per label: \n', df_test[cols_label].eq(0).sum())
Count of 1 per label: toxic 3092 severe_toxic 313 obscene 1667 threat 99 insult 1585 identity_hate 269 dtype: int64 Count of 0 per label: toxic 28823 severe_toxic 31602 obscene 30248 threat 31816 insult 30330 identity_hate 31646 dtype: int64
# df_test_labels = pd.read_csv('test_labels.csv')
# df_test = df_test.merge(df_test_labels, on='id', how='left')
df_test.isna().sum().sum()
0
df_test[col_ohe] = df_test[cols_label].to_numpy().tolist()
df_test.head(2)
id | comment_text | toxic | severe_toxic | obscene | threat | insult | identity_hate | one_hot_labels | |
---|---|---|---|---|---|---|---|---|---|
0 | 70bbc3e96dd459b1 | Hammed it is, cheers! | 0 | 0 | 0 | 0 | 0 | 0 | [0, 0, 0, 0, 0, 0] |
1 | 0b2e86f819b4b9a4 | Not a problem, sorry for the inconvenience and... | 0 | 0 | 0 | 0 | 0 | 0 | [0, 0, 0, 0, 0, 0] |
# Gathering input data
test_labels = list(df_test[cols_label].values)
test_comments = list(df_test[col_text].values)
%%time
# Encoding input data
test_encodings = tokenizer.batch_encode_plus(test_comments,
max_length=max_length,
pad_to_max_length=True)
test_input_ids = test_encodings['input_ids']
test_token_type_ids = test_encodings['token_type_ids']
test_attention_masks = test_encodings['attention_mask']
/usr/local/lib/python3.6/dist-packages/transformers/tokenization_utils_base.py:2142: FutureWarning: The `pad_to_max_length` argument is deprecated and will be removed in a future version, use `padding=True` or `padding='longest'` to pad to the longest sequence in the batch, or use `padding='max_length'` to pad to a max length. In this case, you can give a specific length with `max_length` (e.g. `max_length=45`) or leave max_length to None to pad to the maximal input size of the model (e.g. 512 for Bert). FutureWarning,
CPU times: user 14.7 s, sys: 15.3 ms, total: 14.7 s Wall time: 14.7 s
# Make tensors out of data
test_inputs = torch.tensor(test_input_ids)
test_labels = torch.tensor(test_labels)
test_masks = torch.tensor(test_attention_masks)
test_token_types = torch.tensor(test_token_type_ids)
# Create test dataloader
test_data = TensorDataset(test_inputs, test_masks, test_labels,
test_token_types)
test_sampler = SequentialSampler(test_data)
test_dataloader = DataLoader(test_data, sampler=test_sampler,
batch_size=batch_size)
# Save test dataloader
torch.save(test_dataloader,'test_data_loader')
%%time
# Test
# Put model in evaluation mode to evaluate loss on the validation set
model.eval()
#track variables
logit_preds,true_labels,pred_labels,tokenized_texts = [],[],[],[]
# Predict
for i, batch in enumerate(test_dataloader):
batch = tuple(b.to(device) for b in batch)
# Unpack the inputs from our dataloader
b_input_ids, b_input_mask, b_labels, b_token_types = batch
with torch.no_grad():
# Forward pass
outs = model(b_input_ids, token_type_ids=None,
attention_mask=b_input_mask)
b_logit_pred = outs[0]
pred_label = torch.sigmoid(b_logit_pred)
b_logit_pred = b_logit_pred.detach().cpu().numpy()
pred_label = pred_label.to('cpu').numpy()
b_labels = b_labels.to('cpu').numpy()
tokenized_texts.append(b_input_ids)
logit_preds.append(b_logit_pred)
true_labels.append(b_labels)
pred_labels.append(pred_label)
# Flatten outputs
tokenized_texts = [item for sublist in tokenized_texts for item in sublist]
pred_labels = [item for sublist in pred_labels for item in sublist]
true_labels = [item for sublist in true_labels for item in sublist]
true_bools = [tl==1 for tl in true_labels]
CPU times: user 4min 45s, sys: 3min 2s, total: 7min 47s Wall time: 7min 47s
len(true_bools), df_test.shape
(31915, (31915, 9))
pred_bools = [pl>0.50 for pl in pred_labels]
f1= f1_score(true_bools, pred_bools,average='micro')
acc = accuracy_score(true_bools, pred_bools)
print(f'F1-score (micro) : {f1:.4f}')
print(f'Accuracy (overall): {acc:.4f}')
F1-score (micro) : 0.7664 Accuracy (overall): 0.9179
from sklearn.metrics import multilabel_confusion_matrix
mcm = multilabel_confusion_matrix(true_bools, pred_bools)
mcm
array([[[28092, 731], [ 500, 2592]], [[31580, 22], [ 284, 29]], [[29813, 435], [ 223, 1444]], [[31794, 22], [ 60, 39]], [[29946, 384], [ 421, 1164]], [[31563, 83], [ 131, 138]]])
clf_report = classification_report(true_bools,pred_bools,
target_names=cols_label)
print(clf_report)
precision recall f1-score support toxic 0.78 0.84 0.81 3092 severe_toxic 0.57 0.09 0.16 313 obscene 0.77 0.87 0.81 1667 threat 0.64 0.39 0.49 99 insult 0.75 0.73 0.74 1585 identity_hate 0.62 0.51 0.56 269 micro avg 0.76 0.77 0.77 7025 macro avg 0.69 0.57 0.60 7025 weighted avg 0.75 0.77 0.75 7025 samples avg 0.07 0.07 0.07 7025
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in samples with no predicted labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) /usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in samples with no true labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
coo = np.array(true_bools,dtype=np.int16).T.dot(np.array(pred_bools,dtype=np.int16))
df_coo = pd.DataFrame(coo, columns=cols_label,index=cols_label)
df_coo.style.background_gradient()
toxic | severe_toxic | obscene | threat | insult | identity_hate | |
---|---|---|---|---|---|---|
toxic | 2592 | 51 | 1723 | 61 | 1503 | 212 |
severe_toxic | 310 | 29 | 304 | 19 | 298 | 46 |
obscene | 1594 | 51 | 1444 | 39 | 1215 | 151 |
threat | 94 | 6 | 62 | 39 | 59 | 9 |
insult | 1494 | 47 | 1179 | 38 | 1164 | 155 |
identity_hate | 245 | 25 | 194 | 9 | 195 | 138 |
df_coo2 = df_coo.copy()
df_coo2['Total'] = df_coo2.sum(axis=1)
df_coo2.loc[len(df_coo2),:] = df_coo2.sum(axis=0)
df_coo2.index = df_coo.index.tolist() + ['Total']
df_coo2 = df_coo2.astype(int)
# horizontal is true, vertical is predicted
df_coo2
toxic | severe_toxic | obscene | threat | insult | identity_hate | Total | |
---|---|---|---|---|---|---|---|
toxic | 2592 | 51 | 1723 | 61 | 1503 | 212 | 6142 |
severe_toxic | 310 | 29 | 304 | 19 | 298 | 46 | 1006 |
obscene | 1594 | 51 | 1444 | 39 | 1215 | 151 | 4494 |
threat | 94 | 6 | 62 | 39 | 59 | 9 | 269 |
insult | 1494 | 47 | 1179 | 38 | 1164 | 155 | 4077 |
identity_hate | 245 | 25 | 194 | 9 | 195 | 138 | 806 |
Total | 6329 | 209 | 4906 | 205 | 4434 | 711 | 16794 |
# Calculate Accuracy - maximize F1 accuracy by tuning threshold values.
# First with 'macro_thresholds' on the order of e^-1
# then with 'micro_thresholds' on the order of e^-2
macro_thresholds = np.array(range(1,10))/10
f1_results, flat_acc_results = [], []
for th in macro_thresholds:
pred_bools = [pl>th for pl in pred_labels]
test_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')
test_flat_accuracy = accuracy_score(true_bools, pred_bools)
f1_results.append(test_f1_accuracy)
flat_acc_results.append(test_flat_accuracy)
best_f1_idx = np.argmax(f1_results)
best_macro_th = macro_thresholds[np.argmax(f1_results)]
best_pred_bools = [pl>macro_thresholds[best_f1_idx] for pl in pred_labels]
r = classification_report(true_bools,best_pred_bools,
target_names=cols_label)
print(f'Best Threshold : {macro_thresholds[best_f1_idx]:}')
print(f'Test F1 Accuracy : {f1_results[best_f1_idx]:.4f}')
print(f'Test Flat Accuracy : {flat_acc_results[best_f1_idx]:.4f}')
print(r)
Best Threshold : 0.5 Test F1 Accuracy : 0.7664 Test Flat Accuracy : 0.9179 precision recall f1-score support toxic 0.78 0.84 0.81 3092 severe_toxic 0.57 0.09 0.16 313 obscene 0.77 0.87 0.81 1667 threat 0.64 0.39 0.49 99 insult 0.75 0.73 0.74 1585 identity_hate 0.62 0.51 0.56 269 micro avg 0.76 0.77 0.77 7025 macro avg 0.69 0.57 0.60 7025 weighted avg 0.75 0.77 0.75 7025 samples avg 0.07 0.07 0.07 7025
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in samples with no predicted labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) /usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in samples with no true labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
# F1 micro thresholds
micro_thresholds = (np.array(range(10))/100)+best_macro_th
f1_results, flat_acc_results = [], []
for th in micro_thresholds:
pred_bools = [pl>th for pl in pred_labels]
test_f1_accuracy = f1_score(true_bools,pred_bools,average='micro')
test_flat_accuracy = accuracy_score(true_bools, pred_bools)
f1_results.append(test_f1_accuracy)
flat_acc_results.append(test_flat_accuracy)
best_f1_idx = np.argmax(f1_results)
best_pred_bools = [pl>micro_thresholds[best_f1_idx] for pl in pred_labels]
r = classification_report(true_bools,best_pred_bools,
target_names=cols_label)
print(f'Best Threshold : {micro_thresholds[best_f1_idx]:}')
print(f'Test F1 Accuracy : {f1_results[best_f1_idx]:.4f}')
print(f'Test Flat Accuracy : {flat_acc_results[best_f1_idx]:.4f}')
print(r)
Best Threshold : 0.51 Test F1 Accuracy : 0.7670 Test Flat Accuracy : 0.9185 precision recall f1-score support toxic 0.78 0.84 0.81 3092 severe_toxic 0.61 0.08 0.14 313 obscene 0.77 0.86 0.82 1667 threat 0.64 0.39 0.49 99 insult 0.76 0.73 0.74 1585 identity_hate 0.64 0.51 0.56 269 micro avg 0.77 0.77 0.77 7025 macro avg 0.70 0.57 0.59 7025 weighted avg 0.76 0.77 0.75 7025 samples avg 0.07 0.07 0.07 7025
/usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Precision and F-score are ill-defined and being set to 0.0 in samples with no predicted labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result)) /usr/local/lib/python3.6/dist-packages/sklearn/metrics/_classification.py:1272: UndefinedMetricWarning: Recall and F-score are ill-defined and being set to 0.0 in samples with no true labels. Use `zero_division` parameter to control this behavior. _warn_prf(average, modifier, msg_start, len(result))
time_taken = time.time() - time_start_notebook
h,m = divmod(time_taken,60*60)
print('Time taken to run whole notebook: {:.0f} hr '\
'{:.0f} min {:.0f} secs'.format(h, *divmod(m,60)))
# Time taken to run whole notebook: 4 hr 5 min 6 secs
Time taken to run whole notebook: 4 hr 5 min 6 secs