Meaw0415 / quick_analyze.py
meaw0415's picture
revise
4e595d8 verified
#!/usr/bin/env python3
"""
Quick analysis of NMRGym balanced datasets without expensive scaffold computation
"""
import pickle
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from collections import Counter
from rdkit import Chem
import json
# Set style
sns.set_style("whitegrid")
plt.rcParams['font.size'] = 10
plt.rcParams['figure.dpi'] = 300
# Functional group names (index 0-21)
FG_NAMES = [
"Alcohol", "Carboxylic Acid", "Ester", "Ether", "Aldehyde", "Ketone",
"Alkene", "Alkyne", "Benzene", "Primary Amine", "Secondary Amine",
"Tertiary Amine", "Amide", "Cyano", "Fluorine", "Chlorine",
"Bromine", "Iodine", "Sulfonamide", "Sulfone", "Sulfide", "Phosphoric Acid"
]
def load_dataset(pkl_path):
"""Load a pickle file"""
print(f"Loading {pkl_path}...")
with open(pkl_path, "rb") as f:
return pickle.load(f)
def get_element_counts(smiles):
"""Get element counts from SMILES"""
try:
mol = Chem.MolFromSmiles(smiles)
if mol is None:
return {}
element_counts = {}
for atom in mol.GetAtoms():
symbol = atom.GetSymbol()
element_counts[symbol] = element_counts.get(symbol, 0) + 1
return element_counts
except:
return {}
def analyze_dataset(dataset, name):
"""Analyze a single dataset (quick version without scaffold)"""
print(f"\n{'='*60}")
print(f"Analyzing {name}")
print(f"{'='*60}")
stats = {
'name': name,
'total_records': len(dataset),
'unique_smiles': 0,
'functional_groups': np.zeros(22),
'elements': Counter(),
'h_spectra': 0,
'c_spectra': 0,
}
unique_smiles = set()
for i, record in enumerate(dataset):
if (i + 1) % 1000 == 0:
print(f" Processed {i+1}/{len(dataset)} records...")
smiles = record['smiles']
unique_smiles.add(smiles)
# Count spectra
if 'h_shift' in record and record['h_shift'] is not None and len(record['h_shift']) > 0:
stats['h_spectra'] += 1
if 'c_shift' in record and record['c_shift'] is not None and len(record['c_shift']) > 0:
stats['c_spectra'] += 1
# Functional groups
if 'fg_onehot' in record:
stats['functional_groups'] += record['fg_onehot']
# Element counts
elem_counts = get_element_counts(smiles)
for elem, count in elem_counts.items():
stats['elements'][elem] += count
stats['unique_smiles'] = len(unique_smiles)
print(f"Total records: {stats['total_records']:,}")
print(f"Unique SMILES: {stats['unique_smiles']:,}")
print(f"¹H NMR spectra: {stats['h_spectra']:,}")
print(f"¹³C NMR spectra: {stats['c_spectra']:,}")
print(f"\nTop 5 elements:")
for elem, count in stats['elements'].most_common(5):
print(f" {elem}: {count:,}")
return stats
def plot_functional_groups(train_stats, val_stats, test_stats, output_path):
"""Plot functional group distribution"""
fig, ax = plt.subplots(figsize=(14, 6))
x = np.arange(len(FG_NAMES))
width = 0.25
train_fg = train_stats['functional_groups']
val_fg = val_stats['functional_groups']
test_fg = test_stats['functional_groups']
ax.bar(x - width, train_fg, width, label='Train', alpha=0.8)
ax.bar(x, val_fg, width, label='Val', alpha=0.8)
ax.bar(x + width, test_fg, width, label='Test', alpha=0.8)
ax.set_xlabel('Functional Group')
ax.set_ylabel('Count')
ax.set_title('Functional Group Distribution Across Datasets')
ax.set_xticks(x)
ax.set_xticklabels(FG_NAMES, rotation=45, ha='right')
ax.legend()
ax.grid(axis='y', alpha=0.3)
plt.tight_layout()
plt.savefig(output_path, dpi=300, bbox_inches='tight')
plt.close()
print(f"\nSaved: {output_path}")
def plot_element_distribution(train_stats, val_stats, test_stats, output_path):
"""Plot element distribution for common elements"""
# Focus on common organic elements
common_elements = ['C', 'H', 'O', 'N', 'F', 'Cl', 'Br', 'S', 'P', 'I']
fig, ax = plt.subplots(figsize=(12, 6))
# Get counts for each element
train_counts = [train_stats['elements'].get(e, 0) for e in common_elements]
val_counts = [val_stats['elements'].get(e, 0) for e in common_elements]
test_counts = [test_stats['elements'].get(e, 0) for e in common_elements]
x = np.arange(len(common_elements))
width = 0.25
ax.bar(x - width, train_counts, width, label='Train', alpha=0.8)
ax.bar(x, val_counts, width, label='Val', alpha=0.8)
ax.bar(x + width, test_counts, width, label='Test', alpha=0.8)
ax.set_xlabel('Element')
ax.set_ylabel('Total Count')
ax.set_title('Element Distribution Across Datasets')
ax.set_xticks(x)
ax.set_xticklabels(common_elements)
ax.legend()
ax.grid(axis='y', alpha=0.3)
ax.set_yscale('log')
plt.tight_layout()
plt.savefig(output_path, dpi=300, bbox_inches='tight')
plt.close()
print(f"Saved: {output_path}")
def plot_dataset_overview(train_stats, val_stats, test_stats, output_path):
"""Plot overview of dataset statistics"""
fig, axes = plt.subplots(2, 2, figsize=(14, 10))
# 1. Total records and unique SMILES
ax = axes[0, 0]
datasets = ['Train', 'Val', 'Test']
total_records = [train_stats['total_records'], val_stats['total_records'], test_stats['total_records']]
unique_smiles = [train_stats['unique_smiles'], val_stats['unique_smiles'], test_stats['unique_smiles']]
x = np.arange(len(datasets))
width = 0.35
ax.bar(x - width/2, total_records, width, label='Total Records', alpha=0.8)
ax.bar(x + width/2, unique_smiles, width, label='Unique SMILES', alpha=0.8)
ax.set_ylabel('Count')
ax.set_title('Dataset Size Comparison')
ax.set_xticks(x)
ax.set_xticklabels(datasets)
ax.legend()
ax.grid(axis='y', alpha=0.3)
# Add value labels on bars
for i, (tr, us) in enumerate(zip(total_records, unique_smiles)):
ax.text(i - width/2, tr, f'{tr:,}', ha='center', va='bottom', fontsize=8)
ax.text(i + width/2, us, f'{us:,}', ha='center', va='bottom', fontsize=8)
# 2. Data duplicates (Total vs Unique SMILES)
ax = axes[0, 1]
duplication_ratio = [1 - (u/t) if t > 0 else 0 for u, t in zip(unique_smiles, total_records)]
bars = ax.bar(datasets, duplication_ratio, alpha=0.8, color='coral')
ax.set_ylabel('Duplication Ratio')
ax.set_title('Data Duplication (1 - Unique/Total)')
ax.grid(axis='y', alpha=0.3)
ax.set_ylim(0, max(duplication_ratio) * 1.2 if max(duplication_ratio) > 0 else 1)
for i, (bar, ratio) in enumerate(zip(bars, duplication_ratio)):
height = bar.get_height()
ax.text(bar.get_x() + bar.get_width()/2., height,
f'{ratio:.2%}',
ha='center', va='bottom', fontsize=9)
# 3. NMR spectra types
ax = axes[1, 0]
h_spectra = [train_stats['h_spectra'], val_stats['h_spectra'], test_stats['h_spectra']]
c_spectra = [train_stats['c_spectra'], val_stats['c_spectra'], test_stats['c_spectra']]
x = np.arange(len(datasets))
width = 0.35
ax.bar(x - width/2, h_spectra, width, label='¹H NMR', alpha=0.8)
ax.bar(x + width/2, c_spectra, width, label='¹³C NMR', alpha=0.8)
ax.set_ylabel('Count')
ax.set_title('NMR Spectra Types')
ax.set_xticks(x)
ax.set_xticklabels(datasets)
ax.legend()
ax.grid(axis='y', alpha=0.3)
# 4. Top 5 elements (train set)
ax = axes[1, 1]
top_elements = train_stats['elements'].most_common(5)
elements = [e[0] for e in top_elements]
counts = [e[1] for e in top_elements]
ax.bar(elements, counts, alpha=0.8, color='skyblue')
ax.set_ylabel('Total Count')
ax.set_title('Top 5 Elements (Train Set)')
ax.grid(axis='y', alpha=0.3)
ax.set_yscale('log')
plt.tight_layout()
plt.savefig(output_path, dpi=300, bbox_inches='tight')
plt.close()
print(f"Saved: {output_path}")
def main():
# File paths
train_path = "/gemini/code/NMRGym/NMRGym_train_balanced.pkl"
val_path = "/gemini/code/NMRGym/NMRGym_val_balanced.pkl"
test_path = "/gemini/code/NMRGym/NMRGym_test_balanced.pkl"
# Load datasets
train_data = load_dataset(train_path)
val_data = load_dataset(val_path)
test_data = load_dataset(test_path)
# Analyze each dataset
train_stats = analyze_dataset(train_data, "Train (Balanced)")
val_stats = analyze_dataset(val_data, "Val (Balanced)")
test_stats = analyze_dataset(test_data, "Test (Balanced)")
# Generate visualizations
print("\n" + "="*60)
print("Generating visualizations...")
print("="*60)
plot_dataset_overview(train_stats, val_stats, test_stats,
"/gemini/code/NMRGym/dataset_overview.png")
plot_functional_groups(train_stats, val_stats, test_stats,
"/gemini/code/NMRGym/functional_groups.png")
plot_element_distribution(train_stats, val_stats, test_stats,
"/gemini/code/NMRGym/element_distribution.png")
# Save statistics as JSON
summary = {
'train': {
'total_records': train_stats['total_records'],
'unique_smiles': train_stats['unique_smiles'],
'h_spectra': train_stats['h_spectra'],
'c_spectra': train_stats['c_spectra'],
},
'val': {
'total_records': val_stats['total_records'],
'unique_smiles': val_stats['unique_smiles'],
'h_spectra': val_stats['h_spectra'],
'c_spectra': val_stats['c_spectra'],
},
'test': {
'total_records': test_stats['total_records'],
'unique_smiles': test_stats['unique_smiles'],
'h_spectra': test_stats['h_spectra'],
'c_spectra': test_stats['c_spectra'],
}
}
with open('/gemini/code/NMRGym/dataset_stats.json', 'w') as f:
json.dump(summary, f, indent=2)
print("\nSaved: /gemini/code/NMRGym/dataset_stats.json")
# Print final summary table
print("\n" + "="*60)
print("FINAL SUMMARY")
print("="*60)
print(f"{'Dataset':<15} {'Records':>10} {'Unique SMILES':>15} {'¹H NMR':>10} {'¹³C NMR':>10}")
print("-" * 70)
for name, stats in [('Train', train_stats), ('Val', val_stats), ('Test', test_stats)]:
print(f"{name:<15} {stats['total_records']:>10,} {stats['unique_smiles']:>15,} "
f"{stats['h_spectra']:>10,} {stats['c_spectra']:>10,}")
total_records = train_stats['total_records'] + val_stats['total_records'] + test_stats['total_records']
total_unique = train_stats['unique_smiles'] + val_stats['unique_smiles'] + test_stats['unique_smiles']
total_h = train_stats['h_spectra'] + val_stats['h_spectra'] + test_stats['h_spectra']
total_c = train_stats['c_spectra'] + val_stats['c_spectra'] + test_stats['c_spectra']
print("-" * 70)
print(f"{'Total':<15} {total_records:>10,} {total_unique:>15,} "
f"{total_h:>10,} {total_c:>10,}")
print("="*60 + "\n")
if __name__ == "__main__":
main()