Spaces:
Sleeping
Sleeping
| import pandas as pd | |
| import numpy as np | |
| from sklearn.preprocessing import StandardScaler | |
| from sklearn.preprocessing import LabelEncoder | |
| from sklearn.decomposition import PCA | |
| from sklearn.cluster import KMeans, AgglomerativeClustering | |
| import matplotlib.pyplot as plt | |
| import seaborn as sns | |
| from scipy.cluster.hierarchy import linkage, dendrogram | |
| from sklearn.metrics.pairwise import cosine_similarity | |
| import streamlit as st | |
| data_path = 'micro_world_139countries.csv' | |
| data = pd.read_csv(data_path, encoding='ISO-8859-1', low_memory=False) | |
| df = pd.read_csv(data_path, encoding='ISO-8859-1') | |
| subset_cols = ['economycode', 'age', 'fin7', 'fin8', 'fin8a', 'fin8b', 'fin22a', 'fin24', 'fin34a', 'anydigpayment', 'fin30', 'inc_q', 'educ', 'urbanicity_f2f', 'emp_in'] | |
| subset_df = df[subset_cols].dropna() | |
| cc_usage_country = subset_df.groupby('economycode')['fin8'].sum() | |
| total_usage = cc_usage_country.sum() | |
| total_usage_prc = (cc_usage_country / total_usage) * 100 | |
| st.title('Welcome to my humble analysis :sunglasses:') | |
| #REFRESH | |
| if st.button('Refresh Page'): | |
| st.experimental_rerun() | |
| #=================================================================================================== | |
| #DESCRIPTIVES 1st ASSIGNMENT #there are two csv reading so that it fits the 1st assignment's code... | |
| #=================================================================================================== | |
| if st.button("CREDIT CARD USAGE DESCRIPTIVES"): | |
| # Bar chart | |
| plt.figure(figsize=(12, 6)) | |
| plt.bar(total_usage_prc.index, total_usage_prc.values, color='green') | |
| plt.xlabel('Country', color='blue') | |
| plt.ylabel('Credit Card Usage (%)') | |
| plt.title('Credit Card Usage by Country %', color='blue') | |
| plt.xticks(rotation=90) | |
| plt.tight_layout() | |
| #dataframe conversion | |
| df_percentage = total_usage_prc.reset_index() | |
| df_percentage.columns = ['economycode', 'percentage'] | |
| #get the economy codes | |
| min_prc_country = df_percentage.loc[df_percentage['percentage'].idxmin()] | |
| max_prc_country = df_percentage.loc[df_percentage['percentage'].idxmax()] | |
| st.write(f"The minimum CC usage is in: {min_prc_country['economycode']} {min_prc_country['percentage']:.2f}%") | |
| st.write(f"The maximum CC usage is in: {max_prc_country['economycode']} {max_prc_country['percentage']:.2f}%") | |
| # CC per country table | |
| st.table(df_percentage) | |
| st.bar_chart(total_usage_prc) | |
| #========================================================================================================= | |
| #CLUSTERING | |
| #========================================================================================================= | |
| selected_columns = ['age', 'inc_q', 'fin44a', 'fin44b', 'fin44c', 'fin44d', | |
| 'borrowed', 'saved', 'account_fin', 'anydigpayment', | |
| 'internetaccess'] | |
| selected_columns=data[selected_columns] #converting to df, actually not needed :D | |
| mean_values=selected_columns['age'].mean() | |
| selected_columns.fillna(mean_values, inplace=True) | |
| selected_columns.isnull().sum() #filling the 'age' missing vals with the column mean, also not needed HHAHAH | |
| features = ['age', 'inc_q', 'fin44a', 'fin44b', 'fin44c', 'fin44d', | |
| 'borrowed', 'saved', 'account_fin', 'anydigpayment', | |
| 'internetaccess'] | |
| X = data[features] | |
| X.isna().sum() | |
| X_means=X['age'].mean() | |
| X.fillna(X_means, inplace=True) | |
| X.isna().sum() #here is needed :) | |
| #X['age'] = pd.to_numeric(X['age']) | |
| #X['age'] | |
| scaler = StandardScaler() | |
| X_scaled = scaler.fit_transform(X) | |
| #print(X_scaled) | |
| pca = PCA() | |
| X_pca = pca.fit_transform(X_scaled) | |
| #X_pca | |
| explained_variance = pca.explained_variance_ratio_ | |
| with st.expander("Explained Variance"): | |
| plt.figure(figsize=(12,7)) | |
| plt.bar(range(len(explained_variance)), explained_variance, alpha=0.7, align='center', color='teal') | |
| plt.ylabel('Explained Variance Ratio', fontsize=14) | |
| plt.xlabel('Principal Components', fontsize=14) | |
| plt.title('PCA: Explained Variance for Each Component', fontsize=16) | |
| plt.xticks(fontsize=12) | |
| plt.yticks(fontsize=12) | |
| plt.tight_layout() | |
| plt.grid(axis='y') | |
| plt.show() | |
| pca = PCA(n_components=2) #first 2 principal components | |
| X_pca_2d = pca.fit_transform(X_scaled) #2D Dataframe | |
| clusters = [] | |
| for i in range(1, 11): | |
| kmeans = KMeans(n_clusters=i, random_state=0).fit(X_pca_2d) | |
| clusters.append(kmeans.inertia_) | |
| #plt.figure(figsize=(12,7)) | |
| #plt.plot(range(1, 11), clusters, marker='o', linestyle='--', color='teal') | |
| #plt.xlabel('Number of Clusters', fontsize=14) | |
| #plt.ylabel('Inertia', fontsize=14) | |
| #plt.title('KMeans Elbow Method for Optimal k', fontsize=16) | |
| #plt.xticks(fontsize=12) | |
| #plt.yticks(fontsize=12) | |
| #plt.grid(True) | |
| #plt.show() | |
| kmeans = KMeans(n_clusters=6, random_state=42) | |
| kmeans.fit(X_pca_2d) | |
| labels = kmeans.labels_ | |
| #print(labels) | |
| #colors = ['red', 'blue', 'green', 'purple', 'black', 'cyan'] | |
| #plt.figure(figsize=(14,8)) | |
| #for i, color, label in zip(range(6), colors, ['Cluster 1', 'Cluster 2', 'Cluster 3', 'Cluster 4', 'cluster 5', 'cluster 6']): | |
| # plt.scatter(X_pca_2d[labels == i, 0], X_pca_2d[labels == i, 1], s=60, c=color, label=label, alpha=0.6, edgecolors='w', linewidth=0.5) | |
| #plt.legend(fontsize=12) | |
| #plt.title('2D PCA with KMeans Clusters', fontsize=16) | |
| #plt.xlabel('First Principal Component', fontsize=14) | |
| #plt.ylabel('Second Principal Component', fontsize=14) | |
| #plt.xticks(fontsize=12) | |
| #plt.yticks(fontsize=12) | |
| #plt.grid(True) | |
| #plt.tight_layout() | |
| #plt.show() | |
| sample_data = X.sample(n=1000, random_state=42) | |
| #print(sample_data.dtypes) | |
| selected_columns = sample_data.columns #selected_columns redefined | |
| #print(selected_columns) | |
| le = LabelEncoder() | |
| #sample_data['inc_q']=le.fit_transform(sample_data['inc_q']) | |
| #sample_data['anydigpayment']=le.fit_transform(sample_data['anydigpayment']) | |
| #sample_data['internetaccess'] = le.fit_transform(sample_data['internetaccess']) | |
| #sample_data['borrowed'] = le.fit_transform(sample_data['borrowed']) | |
| #sample_data['saved'] = le.fit_transform(sample_data['saved']) | |
| #sample_data['account_fin'] = le.fit_transform(sample_data['account_fin']) | |
| #sample_data['fin44a'] = le.fit_transform(sample_data['fin44a']) | |
| #sample_data['fin44b'] = le.fit_transform(sample_data['fin44b']) | |
| #sample_data['fin44c'] = le.fit_transform(sample_data['fin44c']) | |
| #sample_data['fin44d'] = le.fit_transform(sample_data['fin44d']) | |
| sample_data['age'] = le.fit_transform(sample_data['age']) | |
| scaler = StandardScaler() | |
| X_sample_scaled = scaler.fit_transform(sample_data[selected_columns]) | |
| #X_sample_scaled | |
| pca = PCA(n_components=2) | |
| X_pca_2d = pca.fit_transform(X_sample_scaled) | |
| #X_pca_2d #keeping the first 2 clusters which explain the variance | |
| linked = linkage(X_pca_2d, method='ward') #method: ward for distance calculation | |
| #hierarchical clustering dendrogram | |
| #plt.figure(figsize=(30, 21)) | |
| #dendrogram(linked) | |
| #plt.title('Hierarchical Clustering Dendrogram', fontsize=30) | |
| #plt.xlabel('Samples', fontsize=25) | |
| #plt.ylabel('Distance', fontsize=25) | |
| #plt.xticks(fontsize=20, rotation=90) | |
| #locs, labels = plt.xticks() | |
| #plt.xticks(locs[::10], fontsize=20, rotation=90) | |
| #plt.yticks(fontsize=20) | |
| #plt.show() | |
| hierarchical = AgglomerativeClustering(n_clusters=6, metric='euclidean', linkage='ward') | |
| hier_clusters = hierarchical.fit_predict(X_pca_2d) | |
| #hier_clusters | |
| #automatic data labeling with agglomerative | |
| #plt.figure(figsize=(14,8)) | |
| #plt.scatter(X_pca_2d[:, 0], X_pca_2d[:, 1], c=hier_clusters, cmap='plasma') | |
| #plt.title('Hierarchical Clustering with 2D PCA') | |
| #plt.xlabel('First Principal Component') | |
| #plt.ylabel('Second Principal Component') | |
| #plt.grid(True) | |
| #plt.tight_layout() | |
| #plt.show() | |
| similarity_matrix = cosine_similarity(X_pca_2d) | |
| #similarity_matrix | |
| similarity_df = pd.DataFrame(similarity_matrix, index=range(1000), columns=range(1000)) #creating a similarity matrix dataframe | |
| sub_simi_df = similarity_df.iloc[:10, :10] | |
| #creating a subset of similarity matrix so it can be shown in the heatmap without glitching | |
| #print(sub_simi_df) | |
| #plt.figure(figsize=(10, 8)) | |
| #sns.heatmap(sub_simi_df, cmap='coolwarm') | |
| #plt.title('Cosine Similarity Matrix Heatmap') | |
| #plt.xlabel('Sample Index') | |
| #plt.ylabel('Sample Index') | |
| #plt.show() | |
| def get_recommendations(index, similarity_df, top_n=5): | |
| sim_scores = similarity_df[index].sort_values(ascending=False) #most similar points come first | |
| sim_scores = sim_scores.iloc[1:top_n+1] #excluding the 1st score which is 0 | |
| similar_indices = sim_scores.index.tolist() | |
| return similar_indices | |
| recommended_indices = get_recommendations(0, similarity_df, top_n=500) | |
| #print(recommended_indices) | |
| #print(f"Recommended records for record 0: {recommended_indices}") | |
| #print("Details of recommended records:") | |
| #print(X.iloc[recommended_indices]) | |
| X['cluster'] = kmeans.labels_ | |
| cluster_analysis = X.groupby('cluster').mean().round(3) | |
| #print(cluster_analysis) | |
| cluster_analysis['economy'] = data['economy'] | |
| cols = ['economy'] + [col for col in cluster_analysis.columns if col != 'economy'] | |
| cluster_analysis = cluster_analysis[cols] #reorder the columns so 'economy' is the first one | |
| print(cluster_analysis) | |
| #print(cols) | |
| # Set the figure size for the subplots | |
| plt.figure(figsize=(12, 6)) | |
| #first plot y=age | |
| plt.subplot(1, 2, 1) # 1 row, 2 columns, 1st subplot | |
| sns.barplot(data=X, x='cluster', y='age', palette='dark') | |
| plt.title('Average Income Quartile by Cluster') | |
| plt.xlabel('Cluster') | |
| plt.ylabel('Average Income Quartile') | |
| plt.xticks(rotation=0) | |
| plt.grid(axis='y') | |
| #second plot y= medical worries | |
| plt.subplot(1, 2, 2) # 1 row, 2 columns, 2nd subplot | |
| sns.barplot(data=X, x='cluster', y='fin44b', palette='dark') | |
| plt.title('Financial Worries about Medical Bills by Cluster') | |
| plt.xlabel('Cluster') | |
| plt.ylabel('Financial Worries about Medical Bills') | |
| plt.xticks(rotation=0) | |
| plt.grid(axis='y') | |
| plt.show() | |
| with st.expander("Start", expanded=False): | |
| st.title('PCA: Explained Variance for Each Component') | |
| st.bar_chart(pca.explained_variance_ratio_) | |
| #st.title('2D PCA with KMeans Clusters') | |
| #scatter | |
| #colors = ['red', 'blue', 'green', 'purple', 'black', 'cyan'] | |
| #plt.figure(figsize=(14, 8)) | |
| #for i, color in zip(range(6), colors): | |
| # plt.scatter(X_pca_2d[labels == i, 0], X_pca_2d[labels == i, 1], s=60, c=color, label=f'Cluster {i + 1}', alpha=0.6, edgecolors='w', linewidth=0.5) | |
| #plt.legend(fontsize=17) | |
| #plt.title('2D PCA with KMeans Clusters', fontsize=16) | |
| #plt.xlabel('First Principal Component', fontsize=14) | |
| #plt.ylabel('Second Principal Component', fontsize=14) | |
| #plt.xticks(fontsize=12) | |
| #plt.yticks(fontsize=12) | |
| #plt.grid(True) | |
| #plt.tight_layout() | |
| #st.pyplot(plt) | |
| st.title("HEATMAP") #Not fitted well to the screen on streamlit :( | |
| plt.figure(figsize=(10, 8)) | |
| sns.heatmap(similarity_df, cmap='coolwarm') | |
| plt.title('Cosine Similarity Matrix Heatmap') | |
| plt.xlabel('Index') | |
| plt.ylabel('Index') | |
| plt.tight_layout() | |
| st.pyplot(plt) | |
| st.title("Cluster Analysis Table") | |
| st.table(cluster_analysis) | |
| st.title("Explanation of Cluster0 as an example:") | |
| st.write(""" | |
| - **Age**: ~48 | |
| - **inc_q** (income quartile): 3.7 (~4) belongs to the 20% of the middle class | |
| - **fin44a** (financially worried about old age): not so worried about financial status about old age (value: 2.8) | |
| - **fin44b** (financially worried about medical bills): not so worried about medical bills (value: 2.8) | |
| - **fin44c** (financially worried about bills): not worried at all about bills (value: 2.95) | |
| - **fin44d** (financially worried about education): not worried at all about educational expenses (value: 3.1) | |
| - **borrowed** (borrowed money in the past year): A value of 0.53 means that in that cluster there are the 53% of people who have borrowed | |
| - **saved** (saved money in the past year): A value of 0.8 means that in that cluster there are the 80% of people who have saved | |
| - **account_fin** (owns an account at a financial institution): A value of 0.99 means that in that cluster there are the 99% of people who own an account at a financial institution | |
| - **anydigpayment** (if the person made any digital payments): A value of 0.99 means that in that cluster there are the 99% of people who made any digital payments | |
| *We can see that the older people between 40 and 50 yrs old are not so worried or worried at all about medical costs than the youth in Afghanistan. In contrast with individuals between 30-40 in cluster4 are very worried about the medical expenses*""") | |
| st.title('2 Plots showing Average Income Quartiles and Medical Financial Worries based on the Age:') | |
| fig, axes = plt.subplots(1, 2, figsize=(12, 6)) | |
| #1st plot | |
| sns.barplot(data=X, x='cluster', y='age', palette='dark', ax=axes[0]) | |
| axes[0].set_title('Average Age by Cluster') | |
| axes[0].set_xlabel('Cluster') | |
| axes[0].set_ylabel('Average Age') | |
| axes[0].grid(axis='y') | |
| #2nd plot | |
| sns.barplot(data=X, x='cluster', y='fin44b', palette='dark', ax=axes[1]) | |
| axes[1].set_title('Financial Worries about Medical Bills by Cluster') | |
| axes[1].set_xlabel('Cluster') | |
| axes[1].set_ylabel('Financial Worries about Medical Bills') | |
| axes[1].grid(axis='y') | |
| plt.tight_layout() | |
| st.pyplot(fig) | |