Добавил:
darkwarius13@gmail.com Рад если помог :). Можешь на почту спасибо сказать Опубликованный материал нарушает ваши авторские права? Сообщите нам.
Вуз: Предмет: Файл:

Лаб 2 / lab_2_full — копия

.py
Скачиваний:
3
Добавлен:
27.06.2021
Размер:
12.5 Кб
Скачать
import numpy as np
import math
import copy as copy
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from sklearn import metrics
from sklearn.decomposition import PCA
from seaborn import scatterplot as scatter
from sklearn.manifold import TSNE

# Check if value is nan
def isNa(value):
    if isinstance(value, int) or isinstance(value, float):
        if math.isnan(value) and value != 0:
            return True
    return False

# Drop maxNaPercents percents of nan values in DataFrame
def dropNaRowByPercent (df, maxNaPercents):
    for y, colObject in df.iteritems():
        countOfNa = 0;
        countOfRows = len(colObject);
        for key in colObject:
            if isNa(key):
                countOfNa += 1
        percentNaInRow = (countOfNa / countOfRows) * 100;
        if percentNaInRow > maxNaPercents:
            #print('deleting', colObject.name)
            df = df.drop(colObject.name, axis=1)
    return df 

# Generate value instead nan
def generateValue(naPosX, naPosY, rowCount):
    distances = []
    for x, naFreeRow in df_dropNa.iterrows():
        res = 0.000000000001 #divide zero
        for y, value in df_res.iloc[naPosX].iteritems():
            if (isNa(value) != True) and (isinstance(value, int) or isinstance(value, float)):
                res += abs(naFreeRow[y] - value)
        distances.append(res/rowCount)
    print(distances)
    inverseDistancesSum = 0
    for distance in distances:
        inverseDistancesSum += 1/distance 
    affiliationLevels = []
    for distance in distances:
        affiliationLevels.append((1/distance)/inverseDistancesSum)
    
    naValue = 0
    iterator = 0
    for x, value in df_dropNa[naPosY].iteritems():
        naValue += value * affiliationLevels[iterator]
        iterator += 1
    return naValue
                    
#%%
file = pd.ExcelFile('Dataset.xlsx')
df = pd.read_excel(file, sheet_name='Лист1', header=1);
df_res = dropNaRowByPercent(df, 4);     
df_dropNa = df_res.dropna()
resultData = df_res.copy()  

for x, row in df_res.iterrows():
   for y, value in row.iteritems():
        if isNa(value):
            #print('X = ',x, 'row= ', row)
            resultData.loc[x, y] = generateValue(x, y, len(df_dropNa.index) + 1)

# lab_2 
# KMeans
#%%        
mainData = resultData.loc[:].drop(columns=['№иб']).drop(columns=['Діагноз']).values
diagnoses = resultData.loc[:,['Діагноз']].replace('ХОЗЛ', 0).replace('БА', 2).replace('Пневмонія', 1).values
x_stan = StandardScaler().fit_transform(mainData)
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(x_stan)

print(y_kmeans)
expected = diagnoses
predicted = y_kmeans
print(metrics.classification_report(expected, predicted))
print(metrics.confusion_matrix(expected, predicted))

#PCA diagnose + kmeans
#%%
pca = PCA(n_components=2)
pca_diagnosis = pca.fit_transform(x_stan)
principalDf = pd.DataFrame(data = pca_diagnosis [:, 0:2], columns = ['principal component 1', 'principal component 2'])

pcaDF = pd.concat([principalDf, pd.DataFrame(data = diagnoses)], axis=1)                    
centers = kmeans.cluster_centers_
f, axes = plt.subplots(1, 2, figsize=(15,7))
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=y_kmeans)
scatter(centers[:,0], centers[:,1], ax=axes[1], marker='s',s=200)
plt.show()

#TSNE diagnose + kmeans
#%%
tsne = TSNE (n_components = 2, perplexity = 18, n_iter=1000, random_state = 43, learning_rate = 100)
x_2D = tsne.fit_transform(x_stan)
tsneDf = pd.DataFrame(data = x_2D, columns = ['dim 1', 'dim 2'])
final_tsneDf = pd.concat([tsneDf,  pd.DataFrame(data = diagnoses)], axis = 1)
f, axes = plt.subplots(1, 2, figsize=(15,7))
scatter(final_tsneDf.loc[:, 'dim 1'], final_tsneDf.loc[:, 'dim 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(final_tsneDf.loc[:,'dim 1'], final_tsneDf.loc[:, 'dim 2'], ax=axes[1], hue=y_kmeans)
scatter(centers[:,0], centers[:,1], ax=axes[1], marker="s",s=200)
plt.show()

#PCA diagnose + DBSCAN
#%%
from sklearn.cluster import DBSCAN
dbscan = DBSCAN(eps=9, min_samples=3)
dbscan.fit(x_stan)

f, axes = plt.subplots(1, 2, figsize=(15,7))
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=dbscan.labels_)

plt.title("PCA DBSCAN")
plt.show()

#TSNE diagnose + DBSCAN
#%%
tsne = TSNE (n_components = 2, perplexity = 18, n_iter=1000, random_state = 43, learning_rate = 100)
x_2D = tsne.fit_transform(x_stan)
tsneDf = pd.DataFrame(data = x_2D, columns = ['dim 1', 'dim 2'])
final_tsneDf = pd.concat([tsneDf,  pd.DataFrame(data = diagnoses)], axis = 1)
f, axes = plt.subplots(1, 2, figsize=(15,7))
scatter(final_tsneDf.loc[:, 'dim 1'], final_tsneDf.loc[:, 'dim 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(final_tsneDf.loc[:, 'dim 1'], final_tsneDf.loc[:, 'dim 2'], ax=axes[1], hue=dbscan.labels_)
plt.show()

#Сompare clustering methods
#%%
predicted = dbscan.labels_
print(metrics.adjusted_rand_score(expected[:,0], predicted))
print(metrics.adjusted_mutual_info_score(expected[:,0], predicted))
print(metrics.homogeneity_score(expected[:,0], predicted))
print(metrics.completeness_score(expected[:,0], predicted))
print(metrics.v_measure_score(expected[:,0], predicted))
print(metrics.silhouette_score(x_stan,expected[:,0]))

#Lab 3
#%%
mainData_1 = resultData.loc[:].drop(columns=['№иб']).drop(columns=['Діагноз']).values
diagnoses_1 = resultData.loc[:,['Діагноз']].replace('ХОЗЛ', 0).replace('БА', 2).replace('Пневмонія', 1).values
x_stan_1 = StandardScaler().fit_transform(mainData)

#split
#%%
from sklearn.model_selection import train_test_split
X_train, X_test, Y_train, Y_test = train_test_split(x_stan_1, diagnoses_1, test_size=0.3, random_state=22)

#train
#%%
from sklearn.linear_model import LinearRegression, LogisticRegression
linearRegressor = LinearRegression()
logisticRegressor = LogisticRegression()

linearRegressor.fit(X_train, Y_train)
logisticRegressor.fit(X_train, Y_train.ravel())
print(linearRegressor.intercept_)
print(linearRegressor.coef_)

#testing linearRegressor
#%%
y_pred = linearRegressor.predict(X_test)
df_test_lin_reg = pd.DataFrame({'Фактичний': Y_test.flatten(), 'Передбачуваний': y_pred.flatten()})

#MAE RMSE R2 linearRegressor
#%%
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
LinRegMAE = mean_absolute_error(Y_test.flatten(), y_pred.flatten()) 
LinRegRmse = mean_squared_error(Y_test.flatten(), y_pred.flatten()) 
LinRegR2 = r2_score(Y_test.flatten(), y_pred.flatten())
print('mean_absolute_error: ', LinRegMAE)
print('mean_squared_error: ', LinRegRmse)
print('r2_score: ', LinRegR2)

#testing logisticRegressor
#%%
y_pred_log_reg = logisticRegressor.predict(X_test)
df_test_log_reg = pd.DataFrame({'Фактичний': Y_test.flatten(), 'Передбачуваний': y_pred_log_reg.flatten()})

#MAE RMSE R2 logisticRegressor
#%%
LogRegMAE = mean_absolute_error(Y_test.flatten(), y_pred_log_reg.flatten()) 
LogRegRmse = mean_squared_error(Y_test.flatten(), y_pred_log_reg.flatten()) 
LogRegR2 = r2_score(Y_test.flatten(), y_pred_log_reg.flatten())
print('mean_absolute_error: ', LogRegMAE)
print('mean_squared_error: ', LogRegRmse)
print('r2_score: ', LogRegR2)

#confusion_matrix classification_report
#%%
import seaborn as sn
conf_matrix = metrics.confusion_matrix(Y_test.flatten(), y_pred_log_reg.flatten());
print('logisticRegressor --- classification_report ', metrics.classification_report(Y_test.flatten(), y_pred_log_reg.flatten()))
print('logisticRegressor --- confusion_matrix ', conf_matrix)

#visualizing confusion matrix
sn.set(font_scale=1.4) # for label size
sn.heatmap(conf_matrix, annot=True, annot_kws={"size": 16}) # font size
plt.show()

#PCA Linear Reggression (for all data) + origin data
#%%

y_predict_all = linearRegressor.predict(x_stan)
f, axes = plt.subplots(1, 2, figsize=(15,7))

scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=y_predict_all.flatten())

plt.title("Linear Reggression")
plt.show()

#PCA Logistic Reggression (for all data) + origin data
#%%

y_logisic_predict_all = logisticRegressor.predict(x_stan)
f, axes = plt.subplots(1, 2, figsize=(15,7))

scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=y_logisic_predict_all.flatten())

plt.title("Logistic Reggression")
plt.show()

#KNeighborsClassifier
#%%

from sklearn.neighbors import KNeighborsClassifier
K_neigh = KNeighborsClassifier(n_neighbors=3)
K_neigh.fit(X_train, Y_train.flatten())
Y_K_neigh_predict = K_neigh.predict(X_test)
K_neigh_conf_matrix = metrics.confusion_matrix(Y_test.flatten(), Y_K_neigh_predict.flatten());
print('KNeighborsClassifier --- classification_report ', metrics.classification_report(Y_test.flatten(), Y_K_neigh_predict.flatten()))
print('KNeighborsClassifier --- confusion_matrix ', K_neigh_conf_matrix)

#visualizing confusion matrix
sn.set(font_scale=1.4) # for label size
sn.heatmap(K_neigh_conf_matrix, annot=True, annot_kws={"size": 16}) # font size
plt.show()

#DecisionTreeClassifier
#%%

from sklearn.tree import DecisionTreeClassifier
DTC = DecisionTreeClassifier(max_depth=5)
DTC.fit(X_train, Y_train.flatten())
DTC_predict = DTC.predict(X_test)
DTC_conf_matrix = metrics.confusion_matrix(Y_test.flatten(), DTC_predict.flatten());
print('DecisionTreeClassifier --- classification_report ', metrics.classification_report(Y_test.flatten(), DTC_predict.flatten()))
print('DecisionTreeClassifier --- confusion_matrix ', DTC_conf_matrix)

#visualizing confusion matrix
sn.set(font_scale=1.4) # for label size
sn.heatmap(DTC_conf_matrix, annot=True, annot_kws={"size": 16}) # font size
plt.show()

#MLPClassifier
#%%

from sklearn.neural_network import MLPClassifier
MLP = MLPClassifier()
MLP.fit(X_train, Y_train.flatten())
MLP_predict = MLP.predict(X_test)
MLP_conf_matrix = metrics.confusion_matrix(Y_test.flatten(), MLP_predict.flatten());
print('MLPClassifier --- classification_report ', metrics.classification_report(Y_test.flatten(), MLP_predict.flatten()))
print('MLPClassifier --- confusion_matrix ', MLP_conf_matrix)

#visualizing confusion matrix
sn.set(font_scale=1.4) # for label size
sn.heatmap(MLP_conf_matrix, annot=True, annot_kws={"size": 16}) # font size
plt.show()

#PCA KNeighborsClassifier + origin data
#%%

K_neigh_all_predicted = K_neigh.predict(x_stan)
f, axes = plt.subplots(1, 2, figsize=(15,7))

scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=K_neigh_all_predicted.flatten())

plt.title("KNeighborsClassifier")
plt.show()

#PCA MLPClassifier + origin data
#%%

MLP_all_predicted = MLP.predict(x_stan)
f, axes = plt.subplots(1, 2, figsize=(15,7))

scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=MLP_all_predicted.flatten())

plt.title("MLPClassifier")
plt.show()

#PCA DecisionTreeClassifier + origin data
#%%

DTC_all_predicted = DTC.predict(x_stan)
f, axes = plt.subplots(1, 2, figsize=(15,7))

scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[0], hue=diagnoses[:,0])
scatter(pcaDF.loc[:, 'principal component 1'], pcaDF.loc[:, 'principal component 2'], ax=axes[1], hue=DTC_all_predicted.flatten())

plt.title("DecisionTreeClassifier")
plt.show()
Соседние файлы в папке Лаб 2