Commit
·
b3e1b1c
1
Parent(s):
a12ee73
Download model weights in Dockerfile. Large revision of README to include submission instructions. Add challenge-cli.py to make uploading via the Dyff API easier.
Browse files- Dockerfile +6 -1
- README.md +150 -40
- challenge-cli.py +293 -0
- upload_model.py +0 -93
Dockerfile
CHANGED
|
@@ -8,6 +8,12 @@ RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel
|
|
| 8 |
|
| 9 |
WORKDIR /app/
|
| 10 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
COPY requirements.cpu.txt ./
|
| 12 |
RUN python3 -m pip install --no-cache-dir -r ./requirements.cpu.txt
|
| 13 |
|
|
@@ -15,7 +21,6 @@ COPY requirements.torch.cpu.txt ./
|
|
| 15 |
RUN python3 -m pip install --no-cache-dir -r ./requirements.torch.cpu.txt
|
| 16 |
|
| 17 |
COPY app ./app
|
| 18 |
-
COPY models ./models
|
| 19 |
COPY main.py ./
|
| 20 |
|
| 21 |
EXPOSE 8000
|
|
|
|
| 8 |
|
| 9 |
WORKDIR /app/
|
| 10 |
|
| 11 |
+
# Download models during build instead of copying from local
|
| 12 |
+
COPY scripts/model_download.bash /tmp/model_download.bash
|
| 13 |
+
RUN python3 -m pip install --no-cache-dir huggingface-hub && \
|
| 14 |
+
bash /tmp/model_download.bash && \
|
| 15 |
+
rm /tmp/model_download.bash
|
| 16 |
+
|
| 17 |
COPY requirements.cpu.txt ./
|
| 18 |
RUN python3 -m pip install --no-cache-dir -r ./requirements.cpu.txt
|
| 19 |
|
|
|
|
| 21 |
RUN python3 -m pip install --no-cache-dir -r ./requirements.torch.cpu.txt
|
| 22 |
|
| 23 |
COPY app ./app
|
|
|
|
| 24 |
COPY main.py ./
|
| 25 |
|
| 26 |
EXPOSE 8000
|
README.md
CHANGED
|
@@ -2,9 +2,127 @@
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
|
| 5 |
-
#
|
| 6 |
|
| 7 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 8 |
|
| 9 |
## Quick Start
|
| 10 |
|
|
@@ -70,34 +188,36 @@ Example response:
|
|
| 70 |
|
| 71 |
```
|
| 72 |
example-submission/
|
| 73 |
-
├── main.py
|
| 74 |
├── app/
|
| 75 |
│ ├── core/
|
| 76 |
-
│ │ ├── app.py
|
| 77 |
-
│ │ └── logging.py
|
| 78 |
│ ├── api/
|
| 79 |
-
│ │ ├── models.py
|
| 80 |
-
│ │ ├── controllers.py
|
| 81 |
│ │ └── routes/
|
| 82 |
-
│ │ └── prediction.py
|
| 83 |
│ └── services/
|
| 84 |
-
│ ├── base.py
|
| 85 |
-
│ └── inference.py
|
| 86 |
├── models/
|
| 87 |
│ └── microsoft/
|
| 88 |
-
│ └── resnet-18/
|
| 89 |
├── scripts/
|
| 90 |
-
│ ├── model_download.bash
|
| 91 |
-
│ ├── generate_test_datasets.py
|
| 92 |
-
│ └── test_datasets.py
|
| 93 |
├── Dockerfile
|
| 94 |
-
├── .env.example
|
| 95 |
-
├── cat.json
|
| 96 |
├── makefile
|
| 97 |
-
├── prompt.sh
|
| 98 |
-
├── requirements.in
|
| 99 |
-
├── requirements.txt
|
| 100 |
-
├──
|
|
|
|
|
|
|
| 101 |
└──
|
| 102 |
```
|
| 103 |
|
|
@@ -169,6 +289,16 @@ models/
|
|
| 169 |
└── (other files)
|
| 170 |
```
|
| 171 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
## Configuration
|
| 173 |
|
| 174 |
Settings are managed via environment variables or a `.env` file. See `.env.example` for all available options.
|
|
@@ -196,26 +326,6 @@ export MODEL_NAME="google/vit-base-patch16-224"
|
|
| 196 |
uvicorn main:app --reload
|
| 197 |
```
|
| 198 |
|
| 199 |
-
## Deployment
|
| 200 |
-
|
| 201 |
-
**Development:**
|
| 202 |
-
```bash
|
| 203 |
-
uvicorn main:app --reload
|
| 204 |
-
```
|
| 205 |
-
|
| 206 |
-
**Production:**
|
| 207 |
-
```bash
|
| 208 |
-
gunicorn main:app -w 4 -k uvicorn.workers.UvicornWorker --bind 0.0.0.0:8000
|
| 209 |
-
```
|
| 210 |
-
|
| 211 |
-
The service runs on CPU by default. For GPU inference, install CUDA-enabled PyTorch and modify your service to move tensors to the GPU device.
|
| 212 |
-
|
| 213 |
-
**Docker:**
|
| 214 |
-
- Multi-stage build keeps the image small
|
| 215 |
-
- Runs as non-root user (`appuser`)
|
| 216 |
-
- Python dependencies installed in user site-packages
|
| 217 |
-
- Model files baked into the image
|
| 218 |
-
|
| 219 |
## What Happens When You Start the Server
|
| 220 |
|
| 221 |
```
|
|
|
|
| 2 |
license: apache-2.0
|
| 3 |
---
|
| 4 |
|
| 5 |
+
# Example Submission for the SAFE: Image Edit Detection and Localization Challenge 2025
|
| 6 |
|
| 7 |
+
This project provides a starting point for implementing a submission to the [SAFE: Image Edit Detection and Localization Challenge 2025](https://app.dyff.io/challenges/dc509a8c771b492b90c43012fde9a04f). You do not need to use this code to participate in the challenge.
|
| 8 |
+
|
| 9 |
+
# How to participate
|
| 10 |
+
|
| 11 |
+
Visit the [challenge home page](https://app.dyff.io/challenges/dc509a8c771b492b90c43012fde9a04f) and sign up using the linked registration form.
|
| 12 |
+
|
| 13 |
+
# How to make a submission
|
| 14 |
+
|
| 15 |
+
The infrastructure for the challenge runs on [DSRI's Dyff platform](https://app.dyff.io). Submissions to the challenge must be in the form of a containerized web service that serves a simple JSON HTTP API. If you're comfortable building a Docker image yourself, you can create a submission from a built image using the [Dyff client](https://docs.dyff.io/python-api/dyff.client/). Or, you can create a Docker [HuggingFace Space](https://huggingface.co/new-space?sdk=docker) and create submissions from the space using a [webform](https://challenge.dyff.io/submit).
|
| 16 |
+
|
| 17 |
+
## General considerations
|
| 18 |
+
|
| 19 |
+
* Your submission will run **without Internet access** during evaluation. All of the files required to run your submission must be packaged along with it. You can either include files in the Docker image, or upload the files as a separate package and mount them in your application container during execution.
|
| 20 |
+
|
| 21 |
+
## Submitting a Docker HuggingFace Space
|
| 22 |
+
|
| 23 |
+
These are the steps to prepare a HF Space for making submissions to the challenge:
|
| 24 |
+
|
| 25 |
+
1. Create a new HuggingFace [**Organization**](https://huggingface.co/organizations/new) (**not a user account**) for your challenge team. **The length of your combined Organization name + Space name must be less than 47 characters** due to a limitation of the HuggingFace API.
|
| 26 |
+
2. Add the [official SAFE Challenge user account](https://huggingface.co/safe-challenge-2025-submissions) as a Member of your organization with `read` permissions. **Make sure you are adding the correct user account;** the account name is `safe-challenge-2025-submissions`. This allows our infrastructure to pull the Docker image built by your Space.
|
| 27 |
+
3. Create a new `Space` within your `Organization`. The Space must use the [Docker SDK](https://huggingface.co/new-space?sdk=docker). **Private Spaces are OK and they will work with the submission process** because you granted `read` access in the previous step. **The length of your combined Organization name + Space name must be less than 47 characters** due to a limitation of the HuggingFace API.
|
| 28 |
+
4. Create a file called `DYFF_TEAM` in the root directory of your HF Space. The contents of the file should be your Team ID (not your Account ID); your Team ID is a 32-character hexidecimal string. This file allows our infrastructure to verify that your Team controls this HF Space.
|
| 29 |
+
5. Create a `Dockerfile` in your Space that builds your challenge submission image.
|
| 30 |
+
6. Run the Space; this will build the Docker image.
|
| 31 |
+
7. When you're ready to submit, use the [submission webform](https://challenge.dyff.io/submit) and enter the URL of your Space and the branch that you want to submit.
|
| 32 |
+
|
| 33 |
+
### Handling large models
|
| 34 |
+
|
| 35 |
+
There is a size limitation on Space repositories. If your submission contains large files (such as neural network weights), it may be too large to store in the space. In this case, you need to fetch your files from somewhere else **during the Docker build process**.
|
| 36 |
+
|
| 37 |
+
This means that your Dockerfile should contain something like this:
|
| 38 |
+
|
| 39 |
+
```
|
| 40 |
+
COPY download-my-model.sh ./
|
| 41 |
+
RUN ./download-my-model.sh
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
### Handling private models
|
| 45 |
+
|
| 46 |
+
If access credentials are required to download your model files, you should provide them using the [Secrets feature](https://huggingface.co/docs/hub/spaces-overview#managing-secrets) of HuggingFace Spaces. **Do not hard-code credentials in your Dockerfile or anywhere else in your Space or Organization!**
|
| 47 |
+
|
| 48 |
+
Access the secrets as described in the [Secrets > Buildtime section](https://huggingface.co/docs/hub/spaces-sdks-docker#secrets). Remember that you can't download files at run-time because your system will not have access to the Internet.
|
| 49 |
+
|
| 50 |
+
## Submitting using the Dyff API
|
| 51 |
+
|
| 52 |
+
If you need more flexibility than HuggingFace Spaces allow, or if you just prefer to work with CLI tools and scripts, you can create submissions using the Dyff API.
|
| 53 |
+
|
| 54 |
+
In the [terminology of Dyff](https://docs.dyff.io/tutorial/), the thing that you're submitting is an `InferenceService`. You can think of an `InferenceService` as a recipe for spinning up a Docker container that runs an HTTP server that serves an inference API. To create a new submission, you need to upload the Docker image that the service should run, and, optionally, a volume of files such as neural network weights that will be mounted in the container.
|
| 55 |
+
|
| 56 |
+
### Install the Dyff SDK
|
| 57 |
+
|
| 58 |
+
You need Python 3.10+ (3.12 recommended). We recommend you install into a virtual environment:
|
| 59 |
+
|
| 60 |
+
```
|
| 61 |
+
python3 -m venv venv
|
| 62 |
+
source venv/bin/activate
|
| 63 |
+
```
|
| 64 |
+
|
| 65 |
+
Then, install the Dyff SDK:
|
| 66 |
+
|
| 67 |
+
```
|
| 68 |
+
python3 -m pip install --upgrade dyff
|
| 69 |
+
```
|
| 70 |
+
|
| 71 |
+
### Prepare the submission data
|
| 72 |
+
|
| 73 |
+
Before creating a submission, you need to build the Docker image you want to submit locally. For example, running the `make docker-build` command in this repository will build a Docker image in your local Docker daemon with the name `safe-challenge-2025/example-submission:latest`. You can check that the image exists using the `docker images` command:
|
| 74 |
+
|
| 75 |
+
```
|
| 76 |
+
$ docker images
|
| 77 |
+
REPOSITORY TAG IMAGE ID CREATED SIZE
|
| 78 |
+
safe-challenge-2025/example-submission latest b86a46d856f0 3 hours ago 1.86GB
|
| 79 |
+
...
|
| 80 |
+
```
|
| 81 |
+
|
| 82 |
+
If your submission includes large data files such as neural network weights, we recommend that you upload these separately from the Docker image and then arrange for them to be mounted in the running container at run-time. You can upload a local directory recursively to the Dyff platform. Once uploaded, you will get the ID of a Dyff `Model` resources that you can reference
|
| 83 |
+
|
| 84 |
+
### Use the `challenge-cli` tool
|
| 85 |
+
|
| 86 |
+
This repository contains a CLI script that simplifies the submission process. Usage is like this:
|
| 87 |
+
|
| 88 |
+
```
|
| 89 |
+
$ python3 challenge-cli.py
|
| 90 |
+
Usage: challenge-cli.py [OPTIONS] COMMAND [ARGS]...
|
| 91 |
+
|
| 92 |
+
Options:
|
| 93 |
+
--help Show this message and exit.
|
| 94 |
+
|
| 95 |
+
Commands:
|
| 96 |
+
submit
|
| 97 |
+
upload-submission
|
| 98 |
+
```
|
| 99 |
+
|
| 100 |
+
You create a new submission in two steps. First, upload the submission files:
|
| 101 |
+
|
| 102 |
+
```
|
| 103 |
+
DYFF_API_TOKEN=<your token> python3 challenge-cli.py upload-submission [OPTIONS]
|
| 104 |
+
```
|
| 105 |
+
|
| 106 |
+
Notice that we're providing an access token via an environment variable.
|
| 107 |
+
|
| 108 |
+
This command creates a Dyff `Artifact` resource corresponding to your Docker image, an optional Dyff `Model` resource containing your uploaded model files, and a Dyff `InferenceService` resource that references the `Artifact` and the `Model`.
|
| 109 |
+
|
| 110 |
+
You need to specify your Account ID and the names and paths of the resources you want to upload. You can also use the `--artifact` and `--model` flags to provide the IDs of an `Artifact` or `Model` that already exists instead of creating a new one. For example, if you always use the same Docker image but you mount different model weights in it for different submissions, you can create the Docker image `Artifact` once, and then reference its ID in `--artifact`.
|
| 111 |
+
|
| 112 |
+
After uploading the submission files, you create the actual `Submission` resource with:
|
| 113 |
+
|
| 114 |
+
```
|
| 115 |
+
DYFF_API_TOKEN=<your token> python3 challenge-cli.py submit [OPTIONS]
|
| 116 |
+
```
|
| 117 |
+
|
| 118 |
+
When submitting, you reference the ID of the `InferenceService` you created in the previous step. You also provide your Account ID, your Team ID for the challenge, and the Task ID that you're submitting to.
|
| 119 |
+
|
| 120 |
+
|
| 121 |
+
# How to implement a detector
|
| 122 |
+
|
| 123 |
+
To implement a new detector that you can submit to the challenge, you need to implement an HTTP server that serves the required JSON API for inference requests. This repository contains a template that you can use as a starting point for implementing a detector in Python. You should be able to adapt this template easily to support common model formats such as neural networks built with PyTorch.
|
| 124 |
+
|
| 125 |
+
You are also free to build detectors with any other technologies and software stacks that you want, but you may have to figure out packaging on your own.
|
| 126 |
|
| 127 |
## Quick Start
|
| 128 |
|
|
|
|
| 188 |
|
| 189 |
```
|
| 190 |
example-submission/
|
| 191 |
+
├── main.py # Entry point
|
| 192 |
├── app/
|
| 193 |
│ ├── core/
|
| 194 |
+
│ │ ├── app.py # <= INSTANTIATE YOUR DETECTOR HERE
|
| 195 |
+
│ │ └── logging.py
|
| 196 |
│ ├── api/
|
| 197 |
+
│ │ ├── models.py # Request/response schemas
|
| 198 |
+
│ │ ├── controllers.py # Business logic
|
| 199 |
│ │ └── routes/
|
| 200 |
+
│ │ └── prediction.py # POST /predict
|
| 201 |
│ └── services/
|
| 202 |
+
│ ├── base.py # <= YOUR DETECTOR IMPLEMENTS THIS INTERFACE
|
| 203 |
+
│ └── inference.py # Example service based on ResNet-18
|
| 204 |
├── models/
|
| 205 |
│ └── microsoft/
|
| 206 |
+
│ └── resnet-18/ # Model weights and config
|
| 207 |
├── scripts/
|
| 208 |
+
│ ├── model_download.bash # Downloads resnet-18
|
| 209 |
+
│ ├── generate_test_datasets.py # Creates test datasets
|
| 210 |
+
│ └── test_datasets.py # Runs inference on test datasets
|
| 211 |
├── Dockerfile
|
| 212 |
+
├── .env.example # Environment config template
|
| 213 |
+
├── cat.json # An example /predict request object
|
| 214 |
├── makefile
|
| 215 |
+
├── prompt.sh # Script that makes a /predict request
|
| 216 |
+
├── requirements.cpu.in
|
| 217 |
+
├── requirements.cpu.txt
|
| 218 |
+
├── requirements.torch.cpu.in
|
| 219 |
+
├── requirements.torch.cpu.in
|
| 220 |
+
├── response.json # An example /predict response object
|
| 221 |
└──
|
| 222 |
```
|
| 223 |
|
|
|
|
| 289 |
└── (other files)
|
| 290 |
```
|
| 291 |
|
| 292 |
+
## GPU inference
|
| 293 |
+
|
| 294 |
+
The default configuration in this repo runs the model on CPU and does not contain the necessary dependencies for using GPUs.
|
| 295 |
+
|
| 296 |
+
To enable GPU inference, you need to:
|
| 297 |
+
|
| 298 |
+
1. Base your Docker image on an image that contains the CUDA system packages
|
| 299 |
+
2. Install the GPU version of PyTorch and its dependencies
|
| 300 |
+
3. Use the `.to()` function as necessary in the `load_model()` and `predict()` functions to move model weights and input data to and from the CUDA device
|
| 301 |
+
|
| 302 |
## Configuration
|
| 303 |
|
| 304 |
Settings are managed via environment variables or a `.env` file. See `.env.example` for all available options.
|
|
|
|
| 326 |
uvicorn main:app --reload
|
| 327 |
```
|
| 328 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 329 |
## What Happens When You Start the Server
|
| 330 |
|
| 331 |
```
|
challenge-cli.py
ADDED
|
@@ -0,0 +1,293 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# SPDX-FileCopyrightText: 2025 UL Research Institutes
|
| 2 |
+
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
+
|
| 4 |
+
import functools
|
| 5 |
+
import time
|
| 6 |
+
from datetime import datetime, timedelta, timezone
|
| 7 |
+
from pathlib import Path
|
| 8 |
+
|
| 9 |
+
import click
|
| 10 |
+
|
| 11 |
+
from dyff.client import Client
|
| 12 |
+
from dyff.schema.platform import *
|
| 13 |
+
from dyff.schema.requests import *
|
| 14 |
+
|
| 15 |
+
from app.api.models import PredictionResponse
|
| 16 |
+
|
| 17 |
+
# ----------------------------------------------------------------------------
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
def _wait_for_status(
|
| 21 |
+
get_entity_fn, target_status: str | list[str], *, timeout: timedelta
|
| 22 |
+
) -> str:
|
| 23 |
+
if isinstance(target_status, str):
|
| 24 |
+
target_status = [target_status]
|
| 25 |
+
then = datetime.now(timezone.utc)
|
| 26 |
+
while True:
|
| 27 |
+
try:
|
| 28 |
+
status = get_entity_fn().status
|
| 29 |
+
if status in target_status:
|
| 30 |
+
return status
|
| 31 |
+
except Exception as ex:
|
| 32 |
+
if hasattr(ex, "status"):
|
| 33 |
+
if ex.status != 404: # pyright: ignore
|
| 34 |
+
raise
|
| 35 |
+
elif hasattr(ex, "status_code"):
|
| 36 |
+
if ex.status_code != 404: # pyright: ignore
|
| 37 |
+
raise
|
| 38 |
+
else:
|
| 39 |
+
raise
|
| 40 |
+
if (datetime.now(timezone.utc) - then) >= timeout:
|
| 41 |
+
break
|
| 42 |
+
time.sleep(5)
|
| 43 |
+
raise AssertionError("timeout")
|
| 44 |
+
|
| 45 |
+
|
| 46 |
+
def _common_options(f):
|
| 47 |
+
@click.option(
|
| 48 |
+
"--account",
|
| 49 |
+
type=str,
|
| 50 |
+
required=True,
|
| 51 |
+
help="Your account ID",
|
| 52 |
+
metavar="ID",
|
| 53 |
+
)
|
| 54 |
+
@functools.wraps(f)
|
| 55 |
+
def wrapper(*args, **kwargs):
|
| 56 |
+
return f(*args, **kwargs)
|
| 57 |
+
return wrapper
|
| 58 |
+
|
| 59 |
+
|
| 60 |
+
@click.group()
|
| 61 |
+
def cli():
|
| 62 |
+
pass
|
| 63 |
+
|
| 64 |
+
|
| 65 |
+
@cli.command()
|
| 66 |
+
@_common_options
|
| 67 |
+
@click.option(
|
| 68 |
+
"--name",
|
| 69 |
+
type=str,
|
| 70 |
+
required=True,
|
| 71 |
+
help="The name of your detector model. For display and querying purposes only.",
|
| 72 |
+
)
|
| 73 |
+
@click.option(
|
| 74 |
+
"--image",
|
| 75 |
+
type=str,
|
| 76 |
+
default=None,
|
| 77 |
+
help="The Docker image to upload (e.g., 'some/image:latest')."
|
| 78 |
+
" Must exist in your local Docker deamon."
|
| 79 |
+
" Required if --artifact is not specified.",
|
| 80 |
+
)
|
| 81 |
+
@click.option(
|
| 82 |
+
"--endpoint",
|
| 83 |
+
type=str,
|
| 84 |
+
default="predict",
|
| 85 |
+
help="The endpoint to call on your service to make a prediction.",
|
| 86 |
+
)
|
| 87 |
+
@click.option(
|
| 88 |
+
"--volume",
|
| 89 |
+
type=click.Path(exists=True, file_okay=False, readable=True, resolve_path=True, path_type=Path),
|
| 90 |
+
default=None,
|
| 91 |
+
help="A local directory path containing files to upload and mount in the running Docker container."
|
| 92 |
+
" You should use this if your submission includes large files like neural network weights."
|
| 93 |
+
)
|
| 94 |
+
@click.option(
|
| 95 |
+
"--volume-mount",
|
| 96 |
+
type=click.Path(exists=False, path_type=Path),
|
| 97 |
+
default=None,
|
| 98 |
+
help="The path to mount your uploaded directory in the running Docker container."
|
| 99 |
+
" Must be an absolute path."
|
| 100 |
+
" Required if --volume is specified.")
|
| 101 |
+
@click.option(
|
| 102 |
+
"--artifact",
|
| 103 |
+
"artifact_id",
|
| 104 |
+
type=str,
|
| 105 |
+
default=None,
|
| 106 |
+
help="The ID of the Artifact (i.e., Docker image) to use in the submission, if it already exists."
|
| 107 |
+
" You can pass the artifact.id from a previous invocation.",
|
| 108 |
+
metavar="ID",
|
| 109 |
+
)
|
| 110 |
+
@click.option(
|
| 111 |
+
"--model",
|
| 112 |
+
"model_id",
|
| 113 |
+
type=str,
|
| 114 |
+
default=None,
|
| 115 |
+
help="The ID of the Model (i.e., neural network weights) to use in the submission, if it already exists."
|
| 116 |
+
" You can pass the model.id from a previous invocation.",
|
| 117 |
+
metavar="ID",
|
| 118 |
+
)
|
| 119 |
+
def upload_submission(
|
| 120 |
+
account: str,
|
| 121 |
+
name: str,
|
| 122 |
+
image: str | None,
|
| 123 |
+
endpoint: str,
|
| 124 |
+
volume: Path | None,
|
| 125 |
+
volume_mount: Path | None,
|
| 126 |
+
artifact_id: str | None,
|
| 127 |
+
model_id: str | None,
|
| 128 |
+
) -> None:
|
| 129 |
+
dyffapi = Client()
|
| 130 |
+
|
| 131 |
+
# Upload the image
|
| 132 |
+
if artifact_id is None:
|
| 133 |
+
# Create an Artifact resource
|
| 134 |
+
click.echo("creating Artifact ...")
|
| 135 |
+
artifact = dyffapi.artifacts.create(ArtifactCreateRequest(account=account))
|
| 136 |
+
click.echo(f"artifact.id: \"{artifact.id}\"")
|
| 137 |
+
_wait_for_status(
|
| 138 |
+
lambda: dyffapi.artifacts.get(artifact.id),
|
| 139 |
+
"WaitingForUpload",
|
| 140 |
+
timeout=timedelta(seconds=30),
|
| 141 |
+
)
|
| 142 |
+
|
| 143 |
+
# Push the image from the local Docker daemon
|
| 144 |
+
click.echo("pushing Artifact ...")
|
| 145 |
+
dyffapi.artifacts.push(artifact, source=f"docker-daemon:{image}")
|
| 146 |
+
time.sleep(5)
|
| 147 |
+
|
| 148 |
+
# Indicate that we're done pushing
|
| 149 |
+
dyffapi.artifacts.finalize(artifact.id)
|
| 150 |
+
_wait_for_status(
|
| 151 |
+
lambda: dyffapi.artifacts.get(artifact.id),
|
| 152 |
+
"Ready",
|
| 153 |
+
timeout=timedelta(seconds=30),
|
| 154 |
+
)
|
| 155 |
+
|
| 156 |
+
click.echo("... done")
|
| 157 |
+
else:
|
| 158 |
+
artifact = dyffapi.artifacts.get(artifact_id)
|
| 159 |
+
assert artifact is not None
|
| 160 |
+
|
| 161 |
+
model: Model | None = None
|
| 162 |
+
if model_id is None:
|
| 163 |
+
if volume is not None:
|
| 164 |
+
if volume_mount is None:
|
| 165 |
+
raise click.UsageError("--volume-mount is required when --volume is used")
|
| 166 |
+
|
| 167 |
+
click.echo("creating Model from local directory ...")
|
| 168 |
+
|
| 169 |
+
model = dyffapi.models.create_from_volume(
|
| 170 |
+
volume, name="model_volume", account=account, resources=ModelResources()
|
| 171 |
+
)
|
| 172 |
+
click.echo(f"model.id: \"{model.id}\"")
|
| 173 |
+
_wait_for_status(
|
| 174 |
+
lambda: dyffapi.models.get(model.id),
|
| 175 |
+
"WaitingForUpload",
|
| 176 |
+
timeout=timedelta(seconds=30),
|
| 177 |
+
)
|
| 178 |
+
|
| 179 |
+
click.echo("uploading Model ...")
|
| 180 |
+
dyffapi.models.upload_volume(model, volume)
|
| 181 |
+
_wait_for_status(
|
| 182 |
+
lambda: dyffapi.models.get(model.id),
|
| 183 |
+
"Ready",
|
| 184 |
+
timeout=timedelta(seconds=30),
|
| 185 |
+
)
|
| 186 |
+
|
| 187 |
+
click.echo("... done")
|
| 188 |
+
else:
|
| 189 |
+
model = None
|
| 190 |
+
else:
|
| 191 |
+
model = dyffapi.models.get(model_id)
|
| 192 |
+
assert model is not None
|
| 193 |
+
|
| 194 |
+
# Create a runnable InferenceService
|
| 195 |
+
if volume_mount is not None:
|
| 196 |
+
if model is None:
|
| 197 |
+
raise click.UsageError("--volume-mount requires --volume or --model")
|
| 198 |
+
if not volume_mount.is_absolute():
|
| 199 |
+
raise click.UsageError("--volume-mount must be an absolute path")
|
| 200 |
+
volumeMounts=[
|
| 201 |
+
VolumeMount(
|
| 202 |
+
kind=VolumeMountKind.data,
|
| 203 |
+
name="model",
|
| 204 |
+
mountPath=volume_mount,
|
| 205 |
+
data=VolumeMountData(
|
| 206 |
+
source=EntityIdentifier.of(model),
|
| 207 |
+
),
|
| 208 |
+
),
|
| 209 |
+
]
|
| 210 |
+
else:
|
| 211 |
+
volumeMounts = None
|
| 212 |
+
|
| 213 |
+
# Don't change this
|
| 214 |
+
service_request = InferenceServiceCreateRequest(
|
| 215 |
+
account=account,
|
| 216 |
+
name=name,
|
| 217 |
+
model=None,
|
| 218 |
+
runner=InferenceServiceRunner(
|
| 219 |
+
kind=InferenceServiceRunnerKind.CONTAINER,
|
| 220 |
+
imageRef=EntityIdentifier.of(artifact),
|
| 221 |
+
resources=ModelResources(),
|
| 222 |
+
volumeMounts=volumeMounts,
|
| 223 |
+
),
|
| 224 |
+
interface=InferenceInterface(
|
| 225 |
+
endpoint=endpoint,
|
| 226 |
+
outputSchema=DataSchema.make_output_schema(PredictionResponse),
|
| 227 |
+
),
|
| 228 |
+
)
|
| 229 |
+
click.echo("creating InferenceService ...")
|
| 230 |
+
service = dyffapi.inferenceservices.create(service_request)
|
| 231 |
+
click.echo(f"service.id: \"{service.id}\"")
|
| 232 |
+
click.echo("... done")
|
| 233 |
+
|
| 234 |
+
|
| 235 |
+
@cli.command()
|
| 236 |
+
@_common_options
|
| 237 |
+
@click.option(
|
| 238 |
+
"--task",
|
| 239 |
+
"task_id",
|
| 240 |
+
type=str,
|
| 241 |
+
required=True,
|
| 242 |
+
help="The Task ID to submit to.",
|
| 243 |
+
metavar="ID",
|
| 244 |
+
)
|
| 245 |
+
@click.option(
|
| 246 |
+
"--team",
|
| 247 |
+
"team_id",
|
| 248 |
+
type=str,
|
| 249 |
+
required=True,
|
| 250 |
+
help="The Team ID making the submission.",
|
| 251 |
+
metavar="ID",
|
| 252 |
+
)
|
| 253 |
+
@click.option(
|
| 254 |
+
"--service",
|
| 255 |
+
"service_id",
|
| 256 |
+
type=str,
|
| 257 |
+
required=True,
|
| 258 |
+
help="The InferenceService ID to submit.",
|
| 259 |
+
metavar="ID",
|
| 260 |
+
)
|
| 261 |
+
@click.option(
|
| 262 |
+
"--challenge",
|
| 263 |
+
"challenge_id",
|
| 264 |
+
type=str,
|
| 265 |
+
default="dc509a8c771b492b90c43012fde9a04f",
|
| 266 |
+
help="The Challenge ID to submit to.",
|
| 267 |
+
metavar="ID",
|
| 268 |
+
)
|
| 269 |
+
def submit(account: str, task_id: str, team_id: str, service_id: str, challenge_id: str) -> None:
|
| 270 |
+
dyffapi = Client()
|
| 271 |
+
|
| 272 |
+
challenge = dyffapi.challenges.get(challenge_id)
|
| 273 |
+
challengetask = challenge.tasks[task_id]
|
| 274 |
+
|
| 275 |
+
team = dyffapi.teams.get(team_id)
|
| 276 |
+
|
| 277 |
+
service = dyffapi.inferenceservices.get(service_id)
|
| 278 |
+
|
| 279 |
+
submission = dyffapi.challenges.submit(
|
| 280 |
+
challenge.id,
|
| 281 |
+
challengetask.id,
|
| 282 |
+
SubmissionCreateRequest(
|
| 283 |
+
account=account,
|
| 284 |
+
team=team.id,
|
| 285 |
+
submission=EntityIdentifier(kind="InferenceService", id=service.id),
|
| 286 |
+
),
|
| 287 |
+
)
|
| 288 |
+
click.echo(submission.model_dump_json(indent=2))
|
| 289 |
+
click.echo(f"submission.id: \"{submission.id}\"")
|
| 290 |
+
|
| 291 |
+
|
| 292 |
+
if __name__ == "__main__":
|
| 293 |
+
cli(show_default=True)
|
upload_model.py
DELETED
|
@@ -1,93 +0,0 @@
|
|
| 1 |
-
# SPDX-FileCopyrightText: 2025 UL Research Institutes
|
| 2 |
-
# SPDX-License-Identifier: Apache-2.0
|
| 3 |
-
|
| 4 |
-
import sys
|
| 5 |
-
import time
|
| 6 |
-
from pathlib import Path
|
| 7 |
-
|
| 8 |
-
import click
|
| 9 |
-
|
| 10 |
-
from dyff.client import Client
|
| 11 |
-
from dyff.schema.platform import *
|
| 12 |
-
from dyff.schema.requests import *
|
| 13 |
-
|
| 14 |
-
from app.api.models import PredictionResponse
|
| 15 |
-
|
| 16 |
-
# ----------------------------------------------------------------------------
|
| 17 |
-
|
| 18 |
-
WORKDIR = Path(__file__).resolve().parent
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
@click.command()
|
| 22 |
-
@click.option(
|
| 23 |
-
"--account",
|
| 24 |
-
type=str,
|
| 25 |
-
required=True,
|
| 26 |
-
help="Your account ID",
|
| 27 |
-
)
|
| 28 |
-
@click.option(
|
| 29 |
-
"--name",
|
| 30 |
-
type=str,
|
| 31 |
-
required=True,
|
| 32 |
-
help="The name of your detector model. For display and querying purposes only.",
|
| 33 |
-
)
|
| 34 |
-
@click.option(
|
| 35 |
-
"--image",
|
| 36 |
-
type=str,
|
| 37 |
-
required=True,
|
| 38 |
-
help="The Docker image to upload. Must exist in your local Docker deamon.",
|
| 39 |
-
)
|
| 40 |
-
@click.option(
|
| 41 |
-
"--endpoint",
|
| 42 |
-
type=str,
|
| 43 |
-
default="predict",
|
| 44 |
-
help="The endpoint to call on your model to make a prediction.",
|
| 45 |
-
)
|
| 46 |
-
def main(account: str, name: str, image: str, endpoint: str) -> None:
|
| 47 |
-
dyffapi = Client()
|
| 48 |
-
|
| 49 |
-
# You can set these to a known ID to skip that step
|
| 50 |
-
artifact_id = None
|
| 51 |
-
service_id = None
|
| 52 |
-
|
| 53 |
-
# Upload the image
|
| 54 |
-
if artifact_id is None:
|
| 55 |
-
# Create an Artifact resource
|
| 56 |
-
artifact = dyffapi.artifacts.create(ArtifactCreateRequest(account=account))
|
| 57 |
-
click.echo(f"artifact_id = \"{artifact.id}\"")
|
| 58 |
-
time.sleep(5)
|
| 59 |
-
# Push the image from the local Docker daemon
|
| 60 |
-
dyffapi.artifacts.push(artifact, source=f"docker-daemon:{image}")
|
| 61 |
-
time.sleep(5)
|
| 62 |
-
# Indicate that we're done pushing
|
| 63 |
-
dyffapi.artifacts.finalize(artifact.id)
|
| 64 |
-
else:
|
| 65 |
-
artifact = dyffapi.artifacts.get(artifact_id)
|
| 66 |
-
assert artifact is not None
|
| 67 |
-
|
| 68 |
-
# Create a runnable InferenceService
|
| 69 |
-
if service_id is None:
|
| 70 |
-
# Don't change this
|
| 71 |
-
service_request = InferenceServiceCreateRequest(
|
| 72 |
-
account=account,
|
| 73 |
-
name=name,
|
| 74 |
-
model=None,
|
| 75 |
-
runner=InferenceServiceRunner(
|
| 76 |
-
kind=InferenceServiceRunnerKind.CONTAINER,
|
| 77 |
-
imageRef=EntityIdentifier.of(artifact),
|
| 78 |
-
resources=ModelResources(),
|
| 79 |
-
),
|
| 80 |
-
interface=InferenceInterface(
|
| 81 |
-
endpoint=endpoint,
|
| 82 |
-
outputSchema=DataSchema.make_output_schema(PredictionResponse),
|
| 83 |
-
),
|
| 84 |
-
)
|
| 85 |
-
service = dyffapi.inferenceservices.create(service_request)
|
| 86 |
-
click.echo(f"service_id = \"{service.id}\"")
|
| 87 |
-
else:
|
| 88 |
-
service = dyffapi.inferenceservices.get(service_id)
|
| 89 |
-
assert service is not None
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
if __name__ == "__main__":
|
| 93 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|