Near's Notes

News Text Classification with ML

1. Count Vectors + RidgeClassifier

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
# Count Vectors + RidgeClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import f1_score

train_df = pd.read_csv('/kaggle/input/newsclassestestdata/train_set.csv/train_set.csv', sep='\t', nrows=15000)

vectorizer = CountVectorizer(max_features=3000)
train_test = vectorizer.fit_transform(train_df['text'])

clf = RidgeClassifier()
clf.fit(train_test[:10000], train_df['label'].values[:10000])

val_pred = clf.predict(train_test[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred, average='macro'))

output: 0.7410794074418383

2. TF-IDF + RidgeClassifier

1
2
3
4
5
6
7
8
9
10
from sklearn.feature_extraction.text import TfidfVectorizer

train_df = pd.read_csv('/kaggle/input/newsclassestestdata/train_set.csv/train_set.csv', sep='\t', nrows=15000)
tfidf = TfidfVectorizer(ngram_range=(1,3), max_features=3000)
train_test = tfidf.fit_transform(train_df['text'])
clf = RidgeClassifier()
clf.fit(train_test[:10000], train_df['label'].values[:10000])

val_pred = clf.predict(train_test[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred, average='macro'))

output: 0.8721598830546126

Try a bigger max_features:

1
2
3
4
5
6
tfid_try = TfidfVectorizer(ngram_range=(1, 3), max_features=5000)
train_try = tfid_try.fit_transform(train_df['text'])
clf_try = RidgeClassifier()
clf_try.fit(train_try[:10000], train_df['label'].values[:10000])
val_pred_try = clf_try.predict(train_try[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred_try, average='macro'))

output: 0.8850817067811825

3. LogisticRegression

1
2
3
4
5
6
7
8
9
10
from sklearn import linear_model

tfidf = TfidfVectorizer(ngram_range=(1,3), max_features=5000)
train_test = tfidf.fit_transform(train_df['text'])

reg = linear_model.LogisticRegression(penalty='l2', C=1.0, solver='liblinear')
reg.fit(train_test[:10000], train_df['label'].values[:10000])

val_pred = reg.predict(train_test[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred, average='macro'))

output: 0.8464704900433653

4. SGDClassifier

1
2
3
4
5
6
7
8
tfidf = TfidfVectorizer(ngram_range=(1,3), max_features=5000)
train_test = tfidf.fit_transform(train_df['text'])

reg = linear_model.SGDClassifier(loss="log", penalty='l2', alpha=0.0001,l1_ratio=0.15)
reg.fit(train_test[:10000], train_df['label'].values[:10000])

val_pred = reg.predict(train_test[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred, average='macro'))

output: 0.8461511856339045

5. SVM

1
2
3
4
5
6
7
8
9
from sklearn import svm
tfidf = TfidfVectorizer(ngram_range=(1,3), max_features=5000)
train_test = tfidf.fit_transform(train_df['text'])

reg = svm.SVC(C=1.0, kernel='linear', degree=3, gamma='auto',decision_function_shape='ovr')
reg.fit(train_test[:10000], train_df['label'].values[:10000])

val_pred = reg.predict(train_test[10000:])
print(f1_score(train_df['label'].values[10000:], val_pred, average='macro'))

output: 0.883129115819089

6. Summary

method f1_score
Count Vectors + RidgeClassifier 0.7410794074418383
TF-IDF + RidgeClassifier 0.8850817067811825
TF-IDF + LogisticRegression 0.8464704900433653
TF-IDF + SGDClassifier 0.8461511856339045
TF-IDF + SVM 0.883129115819089
🐶 您的支持将鼓励我继续创作 🐶