Spaces:
Sleeping
Sleeping
File size: 13,228 Bytes
6d10b44 b9f5581 6d10b44 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 |
import pandas as pd
import numpy as np
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.decomposition import PCA
from sklearn.cluster import KMeans, AgglomerativeClustering
import matplotlib.pyplot as plt
import seaborn as sns
from scipy.cluster.hierarchy import linkage, dendrogram
from sklearn.metrics.pairwise import cosine_similarity
import streamlit as st
data_path = 'micro_world_139countries.csv'
data = pd.read_csv(data_path, encoding='ISO-8859-1', low_memory=False)
df = pd.read_csv(data_path, encoding='ISO-8859-1')
subset_cols = ['economycode', 'age', 'fin7', 'fin8', 'fin8a', 'fin8b', 'fin22a', 'fin24', 'fin34a', 'anydigpayment', 'fin30', 'inc_q', 'educ', 'urbanicity_f2f', 'emp_in']
subset_df = df[subset_cols].dropna()
cc_usage_country = subset_df.groupby('economycode')['fin8'].sum()
total_usage = cc_usage_country.sum()
total_usage_prc = (cc_usage_country / total_usage) * 100
st.title('Welcome to my humble analysis :sunglasses:')
#REFRESH
if st.button('Refresh Page'):
st.experimental_rerun()
#===================================================================================================
#DESCRIPTIVES 1st ASSIGNMENT #there are two csv reading so that it fits the 1st assignment's code...
#===================================================================================================
if st.button("CREDIT CARD USAGE DESCRIPTIVES"):
# Bar chart
plt.figure(figsize=(12, 6))
plt.bar(total_usage_prc.index, total_usage_prc.values, color='green')
plt.xlabel('Country', color='blue')
plt.ylabel('Credit Card Usage (%)')
plt.title('Credit Card Usage by Country %', color='blue')
plt.xticks(rotation=90)
plt.tight_layout()
#dataframe conversion
df_percentage = total_usage_prc.reset_index()
df_percentage.columns = ['economycode', 'percentage']
#get the economy codes
min_prc_country = df_percentage.loc[df_percentage['percentage'].idxmin()]
max_prc_country = df_percentage.loc[df_percentage['percentage'].idxmax()]
st.write(f"The minimum CC usage is in: {min_prc_country['economycode']} {min_prc_country['percentage']:.2f}%")
st.write(f"The maximum CC usage is in: {max_prc_country['economycode']} {max_prc_country['percentage']:.2f}%")
# CC per country table
st.table(df_percentage)
st.bar_chart(total_usage_prc)
#=========================================================================================================
#CLUSTERING
#=========================================================================================================
selected_columns = ['age', 'inc_q', 'fin44a', 'fin44b', 'fin44c', 'fin44d',
'borrowed', 'saved', 'account_fin', 'anydigpayment',
'internetaccess']
selected_columns=data[selected_columns] #converting to df, actually not needed :D
mean_values=selected_columns['age'].mean()
selected_columns.fillna(mean_values, inplace=True)
selected_columns.isnull().sum() #filling the 'age' missing vals with the column mean, also not needed HHAHAH
features = ['age', 'inc_q', 'fin44a', 'fin44b', 'fin44c', 'fin44d',
'borrowed', 'saved', 'account_fin', 'anydigpayment',
'internetaccess']
X = data[features]
X.isna().sum()
X_means=X['age'].mean()
X.fillna(X_means, inplace=True)
X.isna().sum() #here is needed :)
#X['age'] = pd.to_numeric(X['age'])
#X['age']
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
#print(X_scaled)
pca = PCA()
X_pca = pca.fit_transform(X_scaled)
#X_pca
explained_variance = pca.explained_variance_ratio_
with st.expander("Explained Variance"):
plt.figure(figsize=(12,7))
plt.bar(range(len(explained_variance)), explained_variance, alpha=0.7, align='center', color='teal')
plt.ylabel('Explained Variance Ratio', fontsize=14)
plt.xlabel('Principal Components', fontsize=14)
plt.title('PCA: Explained Variance for Each Component', fontsize=16)
plt.xticks(fontsize=12)
plt.yticks(fontsize=12)
plt.tight_layout()
plt.grid(axis='y')
plt.show()
pca = PCA(n_components=2) #first 2 principal components
X_pca_2d = pca.fit_transform(X_scaled) #2D Dataframe
clusters = []
for i in range(1, 11):
kmeans = KMeans(n_clusters=i, random_state=0).fit(X_pca_2d)
clusters.append(kmeans.inertia_)
#plt.figure(figsize=(12,7))
#plt.plot(range(1, 11), clusters, marker='o', linestyle='--', color='teal')
#plt.xlabel('Number of Clusters', fontsize=14)
#plt.ylabel('Inertia', fontsize=14)
#plt.title('KMeans Elbow Method for Optimal k', fontsize=16)
#plt.xticks(fontsize=12)
#plt.yticks(fontsize=12)
#plt.grid(True)
#plt.show()
kmeans = KMeans(n_clusters=6, random_state=42)
kmeans.fit(X_pca_2d)
labels = kmeans.labels_
#print(labels)
#colors = ['red', 'blue', 'green', 'purple', 'black', 'cyan']
#plt.figure(figsize=(14,8))
#for i, color, label in zip(range(6), colors, ['Cluster 1', 'Cluster 2', 'Cluster 3', 'Cluster 4', 'cluster 5', 'cluster 6']):
# plt.scatter(X_pca_2d[labels == i, 0], X_pca_2d[labels == i, 1], s=60, c=color, label=label, alpha=0.6, edgecolors='w', linewidth=0.5)
#plt.legend(fontsize=12)
#plt.title('2D PCA with KMeans Clusters', fontsize=16)
#plt.xlabel('First Principal Component', fontsize=14)
#plt.ylabel('Second Principal Component', fontsize=14)
#plt.xticks(fontsize=12)
#plt.yticks(fontsize=12)
#plt.grid(True)
#plt.tight_layout()
#plt.show()
sample_data = X.sample(n=1000, random_state=42)
#print(sample_data.dtypes)
selected_columns = sample_data.columns #selected_columns redefined
#print(selected_columns)
le = LabelEncoder()
#sample_data['inc_q']=le.fit_transform(sample_data['inc_q'])
#sample_data['anydigpayment']=le.fit_transform(sample_data['anydigpayment'])
#sample_data['internetaccess'] = le.fit_transform(sample_data['internetaccess'])
#sample_data['borrowed'] = le.fit_transform(sample_data['borrowed'])
#sample_data['saved'] = le.fit_transform(sample_data['saved'])
#sample_data['account_fin'] = le.fit_transform(sample_data['account_fin'])
#sample_data['fin44a'] = le.fit_transform(sample_data['fin44a'])
#sample_data['fin44b'] = le.fit_transform(sample_data['fin44b'])
#sample_data['fin44c'] = le.fit_transform(sample_data['fin44c'])
#sample_data['fin44d'] = le.fit_transform(sample_data['fin44d'])
sample_data['age'] = le.fit_transform(sample_data['age'])
scaler = StandardScaler()
X_sample_scaled = scaler.fit_transform(sample_data[selected_columns])
#X_sample_scaled
pca = PCA(n_components=2)
X_pca_2d = pca.fit_transform(X_sample_scaled)
#X_pca_2d #keeping the first 2 clusters which explain the variance
linked = linkage(X_pca_2d, method='ward') #method: ward for distance calculation
#hierarchical clustering dendrogram
#plt.figure(figsize=(30, 21))
#dendrogram(linked)
#plt.title('Hierarchical Clustering Dendrogram', fontsize=30)
#plt.xlabel('Samples', fontsize=25)
#plt.ylabel('Distance', fontsize=25)
#plt.xticks(fontsize=20, rotation=90)
#locs, labels = plt.xticks()
#plt.xticks(locs[::10], fontsize=20, rotation=90)
#plt.yticks(fontsize=20)
#plt.show()
hierarchical = AgglomerativeClustering(n_clusters=6, metric='euclidean', linkage='ward')
hier_clusters = hierarchical.fit_predict(X_pca_2d)
#hier_clusters
#automatic data labeling with agglomerative
#plt.figure(figsize=(14,8))
#plt.scatter(X_pca_2d[:, 0], X_pca_2d[:, 1], c=hier_clusters, cmap='plasma')
#plt.title('Hierarchical Clustering with 2D PCA')
#plt.xlabel('First Principal Component')
#plt.ylabel('Second Principal Component')
#plt.grid(True)
#plt.tight_layout()
#plt.show()
similarity_matrix = cosine_similarity(X_pca_2d)
#similarity_matrix
similarity_df = pd.DataFrame(similarity_matrix, index=range(1000), columns=range(1000)) #creating a similarity matrix dataframe
sub_simi_df = similarity_df.iloc[:10, :10]
#creating a subset of similarity matrix so it can be shown in the heatmap without glitching
#print(sub_simi_df)
#plt.figure(figsize=(10, 8))
#sns.heatmap(sub_simi_df, cmap='coolwarm')
#plt.title('Cosine Similarity Matrix Heatmap')
#plt.xlabel('Sample Index')
#plt.ylabel('Sample Index')
#plt.show()
def get_recommendations(index, similarity_df, top_n=5):
sim_scores = similarity_df[index].sort_values(ascending=False) #most similar points come first
sim_scores = sim_scores.iloc[1:top_n+1] #excluding the 1st score which is 0
similar_indices = sim_scores.index.tolist()
return similar_indices
recommended_indices = get_recommendations(0, similarity_df, top_n=500)
#print(recommended_indices)
#print(f"Recommended records for record 0: {recommended_indices}")
#print("Details of recommended records:")
#print(X.iloc[recommended_indices])
X['cluster'] = kmeans.labels_
cluster_analysis = X.groupby('cluster').mean().round(3)
#print(cluster_analysis)
cluster_analysis['economy'] = data['economy']
cols = ['economy'] + [col for col in cluster_analysis.columns if col != 'economy']
cluster_analysis = cluster_analysis[cols] #reorder the columns so 'economy' is the first one
print(cluster_analysis)
#print(cols)
# Set the figure size for the subplots
plt.figure(figsize=(12, 6))
#first plot y=age
plt.subplot(1, 2, 1) # 1 row, 2 columns, 1st subplot
sns.barplot(data=X, x='cluster', y='age', palette='dark')
plt.title('Average Income Quartile by Cluster')
plt.xlabel('Cluster')
plt.ylabel('Average Income Quartile')
plt.xticks(rotation=0)
plt.grid(axis='y')
#second plot y= medical worries
plt.subplot(1, 2, 2) # 1 row, 2 columns, 2nd subplot
sns.barplot(data=X, x='cluster', y='fin44b', palette='dark')
plt.title('Financial Worries about Medical Bills by Cluster')
plt.xlabel('Cluster')
plt.ylabel('Financial Worries about Medical Bills')
plt.xticks(rotation=0)
plt.grid(axis='y')
plt.show()
with st.expander("Start", expanded=False):
st.title('PCA: Explained Variance for Each Component')
st.bar_chart(pca.explained_variance_ratio_)
#st.title('2D PCA with KMeans Clusters')
#scatter
#colors = ['red', 'blue', 'green', 'purple', 'black', 'cyan']
#plt.figure(figsize=(14, 8))
#for i, color in zip(range(6), colors):
# plt.scatter(X_pca_2d[labels == i, 0], X_pca_2d[labels == i, 1], s=60, c=color, label=f'Cluster {i + 1}', alpha=0.6, edgecolors='w', linewidth=0.5)
#plt.legend(fontsize=17)
#plt.title('2D PCA with KMeans Clusters', fontsize=16)
#plt.xlabel('First Principal Component', fontsize=14)
#plt.ylabel('Second Principal Component', fontsize=14)
#plt.xticks(fontsize=12)
#plt.yticks(fontsize=12)
#plt.grid(True)
#plt.tight_layout()
#st.pyplot(plt)
st.title("HEATMAP") #Not fitted well to the screen on streamlit :(
plt.figure(figsize=(10, 8))
sns.heatmap(similarity_df, cmap='coolwarm')
plt.title('Cosine Similarity Matrix Heatmap')
plt.xlabel('Index')
plt.ylabel('Index')
plt.tight_layout()
st.pyplot(plt)
st.title("Cluster Analysis Table")
st.table(cluster_analysis)
st.title("Explanation of Cluster0 as an example:")
st.write("""
- **Age**: ~48
- **inc_q** (income quartile): 3.7 (~4) belongs to the 20% of the middle class
- **fin44a** (financially worried about old age): not so worried about financial status about old age (value: 2.8)
- **fin44b** (financially worried about medical bills): not so worried about medical bills (value: 2.8)
- **fin44c** (financially worried about bills): not worried at all about bills (value: 2.95)
- **fin44d** (financially worried about education): not worried at all about educational expenses (value: 3.1)
- **borrowed** (borrowed money in the past year): A value of 0.53 means that in that cluster there are the 53% of people who have borrowed
- **saved** (saved money in the past year): A value of 0.8 means that in that cluster there are the 80% of people who have saved
- **account_fin** (owns an account at a financial institution): A value of 0.99 means that in that cluster there are the 99% of people who own an account at a financial institution
- **anydigpayment** (if the person made any digital payments): A value of 0.99 means that in that cluster there are the 99% of people who made any digital payments
*We can see that the older people between 40 and 50 yrs old are not so worried or worried at all about medical costs than the youth in Afghanistan. In contrast with individuals between 30-40 in cluster4 are very worried about the medical expenses*""")
st.title('2 Plots showing Average Income Quartiles and Medical Financial Worries based on the Age:')
fig, axes = plt.subplots(1, 2, figsize=(12, 6))
#1st plot
sns.barplot(data=X, x='cluster', y='age', palette='dark', ax=axes[0])
axes[0].set_title('Average Age by Cluster')
axes[0].set_xlabel('Cluster')
axes[0].set_ylabel('Average Age')
axes[0].grid(axis='y')
#2nd plot
sns.barplot(data=X, x='cluster', y='fin44b', palette='dark', ax=axes[1])
axes[1].set_title('Financial Worries about Medical Bills by Cluster')
axes[1].set_xlabel('Cluster')
axes[1].set_ylabel('Financial Worries about Medical Bills')
axes[1].grid(axis='y')
plt.tight_layout()
st.pyplot(fig)
|