22mc3014 - EK - Lab10.ipynb - Colab
22mc3014 - EK - Lab10.ipynb - Colab
ipynb - Colab
import numpy as np
import matplotlib.pyplot as plt
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.naive_bayes import GaussianNB
from sklearn.metrics import accuracy_score
# Expanded Training Data (RGB values and Size for Red Apple, Green Grapes, and Blueberry)
# Format: [R, G, B, Size]
red_apple = [
[255, 0, 0, 10.0], [250, 10, 5, 10.5], [245, 20, 15, 9.8],
[248, 15, 8, 10.2], [252, 5, 3, 10.1]
]
green_grapes = [
[0, 255, 0, 2.0], [5, 250, 10, 2.2], [10, 245, 15, 2.1],
[8, 252, 12, 2.3], [3, 248, 7, 2.4]
]
blueberry = [
[0, 0, 255, 1.5], [10, 5, 250, 1.4], [20, 15, 245, 1.6],
[5, 10, 248, 1.55], [3, 8, 253, 1.7]
]
# 1. LDA Classifier
lda = LDA()
lda.fit(X_train, y_train)
y_pred_lda = lda.predict(X_test)
accuracy_lda = accuracy_score(y_test, y_pred_lda)
print(f'LDA Classification Accuracy: {accuracy_lda * 100:.2f}%')
# 2. KNN Classifier
def knn_classification(k):
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(X_train, y_train)
y_pred_knn = knn.predict(X_test)
accuracy_knn = accuracy_score(y_test, y_pred_knn)
print(f'KNN (k={k}) Classification Accuracy: {accuracy_knn * 100:.2f}%')
import numpy as np
from sklearn.naive_bayes import MultinomialNB
from sklearn.feature_extraction.text import CountVectorizer
https://colab.research.google.com/drive/1wQfmFOC39IjAthkk_YiLztxkeNM8P221#scrollTo=QqG03L1awxkN&printMode=true 2/3
10/23/24, 3:41 PM 22mc3014_EK_Lab10.ipynb - Colab
"Urgent Winner Free Money Money", # Should be classified as Spam
"Lunch Tomorrow with Friend", # Should be classified as Normal
"Claim Free Offer Money Now" # Should be classified as Spam
]
# Convert the test messages to token counts using the same vectorizer
X_test = vectorizer.transform(test_messages)
https://colab.research.google.com/drive/1wQfmFOC39IjAthkk_YiLztxkeNM8P221#scrollTo=QqG03L1awxkN&printMode=true 3/3