File size: 4,932 Bytes
36ed084 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
#!/usr/bin/env python3
"""
Download script for structured3d dataset chunks from Hugging Face
"""
import os
import sys
from pathlib import Path
try:
from huggingface_hub import hf_hub_download, list_repo_files
except ImportError:
print("Error: huggingface_hub not installed")
print("Install with: pip install huggingface_hub")
sys.exit(1)
DATASET_NAME = "structured3d"
DEFAULT_REPO_ID = f"your-username/{DATASET_NAME}-dataset"
def download_chunks(repo_id, token=None):
"""Download structured3d chunks from Hugging Face."""
try:
# List files in the repository
files = list_repo_files(repo_id=repo_id, repo_type="dataset", token=token)
# Filter chunk files
chunk_files = [f for f in files if f.startswith(f"{DATASET_NAME}/{DATASET_NAME}_part_")]
if not chunk_files:
print(f"Error: No chunks found in {repo_id}")
print(f"Expected files like {DATASET_NAME}/{DATASET_NAME}_part_000")
return False
print(f"Found {len(chunk_files)} chunks to download")
print(f"Warning: This will download ~307GB of data. Ensure you have enough disk space!")
response = input("Continue with download? (y/N): ")
if response.lower() != 'y':
print("Download cancelled.")
return False
# Create chunks directory
chunks_dir = Path("chunks")
chunks_dir.mkdir(exist_ok=True)
# Download each chunk
for i, file_path in enumerate(sorted(chunk_files)):
chunk_name = Path(file_path).name
local_path = chunks_dir / chunk_name
print(f"Downloading {chunk_name} ({i+1}/{len(chunk_files)})...")
try:
hf_hub_download(
repo_id=repo_id,
repo_type="dataset",
filename=file_path,
local_dir=".",
token=token
)
# Move to chunks directory
downloaded_path = Path(file_path)
if downloaded_path.exists():
downloaded_path.rename(local_path)
except Exception as e:
print(f" ✗ Error downloading {chunk_name}: {e}")
continue
# Download helper scripts
helper_files = [f for f in files if f.startswith(f"{DATASET_NAME}/") and f.endswith(('.sh', '.py'))]
for file_path in helper_files:
script_name = Path(file_path).name
if script_name != "download.py": # Don't overwrite ourselves
print(f"Downloading {script_name}...")
try:
hf_hub_download(
repo_id=repo_id,
repo_type="dataset",
filename=file_path,
local_dir=".",
token=token
)
# Move to current directory and make executable
downloaded_path = Path(file_path)
if downloaded_path.exists():
downloaded_path.rename(script_name)
if script_name.endswith('.sh'):
os.chmod(script_name, 0o755)
except Exception as e:
print(f" ✗ Error downloading {script_name}: {e}")
# Clean up empty directories
dataset_dir = Path(DATASET_NAME)
if dataset_dir.exists() and not any(dataset_dir.iterdir()):
dataset_dir.rmdir()
print(f"\n✓ Download complete!")
print(f"Downloaded {len(chunk_files)} chunks to chunks/ directory")
print("\nNext steps:")
print("1. Run ./merge.sh to reassemble the original file")
print("2. Run ./extract.sh to extract contents")
print("\nWarning: Extraction will require additional ~307GB of disk space!")
return True
except Exception as e:
print(f"Error accessing repository {repo_id}: {e}")
return False
def main():
import argparse
parser = argparse.ArgumentParser(description=f"Download {DATASET_NAME} chunks from Hugging Face")
parser.add_argument("repo_id", nargs="?", default=DEFAULT_REPO_ID, help="Hugging Face repository ID")
parser.add_argument("--token", help="Hugging Face token (or set HF_TOKEN env var)")
args = parser.parse_args()
# Get token (optional for public repos)
token = args.token or os.getenv("HF_TOKEN")
print(f"Downloading from: {args.repo_id}")
success = download_chunks(
repo_id=args.repo_id,
token=token
)
if not success:
sys.exit(1)
if __name__ == "__main__":
main() |