jessehostetler commited on
Commit
be5bf87
·
1 Parent(s): 33241cf

Fix docker image not working in kubernetes. Fix int width not set in input scherma. Add makefile and test image.

Browse files
.gitignore CHANGED
@@ -1,2 +1,3 @@
1
  models/
2
- venv/
 
 
1
  models/
2
+ venv/
3
+ **/__pycache__
Dockerfile CHANGED
@@ -1,26 +1,20 @@
1
- FROM python:3.12-slim as builder
2
-
3
- WORKDIR /build
4
-
5
- COPY requirements.txt .
6
- RUN pip install --no-cache-dir --user -r requirements.txt
7
-
8
  FROM python:3.12-slim
9
 
10
- WORKDIR /app
 
11
 
12
- RUN useradd -m -u 1000 appuser
 
13
 
14
- COPY --from=builder --chown=appuser:appuser /root/.local /home/appuser/.local
15
- COPY --chown=appuser:appuser app ./app
16
- COPY --chown=appuser:appuser models ./models
17
- COPY --chown=appuser:appuser main.py .
18
 
19
- USER appuser
 
20
 
21
- ENV PATH=/home/appuser/.local/bin:$PATH \
22
- PYTHONUNBUFFERED=1
 
23
 
24
  EXPOSE 8000
25
 
26
- CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
 
 
 
 
 
 
 
 
1
  FROM python:3.12-slim
2
 
3
+ ENV PYTHONDONTWRITEBYTECODE="1" \
4
+ PYTHONUNBUFFERED="1"
5
 
6
+ # hadolint ignore=DL3013
7
+ RUN python3 -m pip install --no-cache-dir --upgrade pip setuptools wheel
8
 
9
+ WORKDIR /app/
 
 
 
10
 
11
+ COPY requirements.txt ./
12
+ RUN python3 -m pip install --no-cache-dir -r ./requirements.txt
13
 
14
+ COPY app ./app
15
+ COPY models ./models
16
+ COPY main.py ./
17
 
18
  EXPOSE 8000
19
 
20
+ ENTRYPOINT ["python3", "-m", "uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]
app/api/models.py CHANGED
@@ -6,6 +6,8 @@ from typing import Optional
6
 
7
  import pydantic
8
 
 
 
9
 
10
  class ImageData(pydantic.BaseModel):
11
  """Image data model for base64 encoded images."""
@@ -39,11 +41,11 @@ class LocalizationMask(pydantic.BaseModel):
39
  final byte isn't "full", then convert the byte array to base64.
40
  """
41
 
42
- width: int = pydantic.Field(
43
  description="The width of the mask."
44
  )
45
 
46
- height: int = pydantic.Field(
47
  description="The height of the mask."
48
  )
49
 
 
6
 
7
  import pydantic
8
 
9
+ from dyff.schema.base import int32
10
+
11
 
12
  class ImageData(pydantic.BaseModel):
13
  """Image data model for base64 encoded images."""
 
41
  final byte isn't "full", then convert the byte array to base64.
42
  """
43
 
44
+ width: int32() = pydantic.Field(
45
  description="The width of the mask."
46
  )
47
 
48
+ height: int32() = pydantic.Field(
49
  description="The height of the mask."
50
  )
51
 
app/services/inference.py CHANGED
@@ -68,9 +68,11 @@ class ResNetInferenceService(InferenceService[ImageRequest, PredictionResponse])
68
  inputs = self.processor(image, return_tensors="pt")
69
 
70
  with torch.no_grad():
71
- logits = self.model(**inputs).logits # pyright: ignore
72
 
73
- logprobs = torch.nn.functional.log_softmax(logits[:len(Labels)], dim=-1).tolist()
 
 
74
  mask_bytes = random.randbytes((width*height + 7) // 8)
75
  mask_bits = base64.b64encode(mask_bytes).decode("utf-8")
76
 
 
68
  inputs = self.processor(image, return_tensors="pt")
69
 
70
  with torch.no_grad():
71
+ logits = self.model(**inputs).logits.squeeze() # pyright: ignore
72
 
73
+ # Convert to expected output format. This is for demonstration purposes
74
+ # and obviously will not perform well on the actual task.
75
+ logprobs = torch.nn.functional.log_softmax(logits[:len(Labels)]).tolist()
76
  mask_bytes = random.randbytes((width*height + 7) // 8)
77
  mask_bits = base64.b64encode(mask_bytes).decode("utf-8")
78
 
cat.json ADDED
The diff for this file is too large to render. See raw diff
 
makefile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .PHONY: download
2
+ download:
3
+ bash scripts/model_download.bash
4
+
5
+ .PHONY: build
6
+ build:
7
+ docker build -t safe-challenge-2025/example-submission:latest .
8
+
9
+ .PHONY: run
10
+ run:
11
+ docker run -d --name example-submission -p 8000:8000 safe-challenge-2025/example-submission:latest
12
+
13
+ .PHONY: stop
14
+ stop:
15
+ docker stop example-submission && docker rm example-submission
prompt.sh ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+
3
+ curl -X POST http://localhost:8000/predict \
4
+ -H "Content-Type: application/json" \
5
+ -d @"$1"
6
+
requirements.in CHANGED
@@ -1,3 +1,5 @@
 
 
1
  # Web framework
2
  fastapi==0.104.1
3
  uvicorn[standard]==0.24.0
 
1
+ dyff
2
+
3
  # Web framework
4
  fastapi==0.104.1
5
  uvicorn[standard]==0.24.0
requirements.txt CHANGED
@@ -1,58 +1,242 @@
1
- #
2
- # This file is autogenerated by pip-compile with Python 3.12
3
- # by the following command:
4
- #
5
- # pip-compile requirements.in
6
- #
7
  annotated-types==0.7.0
8
  # via pydantic
9
  anyio==3.7.1
10
  # via
11
  # fastapi
 
 
12
  # starlette
13
  # watchfiles
14
- certifi==2025.8.3
15
- # via requests
16
- charset-normalizer==3.4.3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  # via requests
18
- click==8.2.1
19
  # via uvicorn
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  fastapi==0.104.1
21
  # via -r requirements.in
22
- filelock==3.19.1
 
 
23
  # via
24
  # huggingface-hub
25
  # torch
26
  # transformers
27
- fsspec==2025.7.0
 
 
28
  # via
29
  # huggingface-hub
30
  # torch
 
 
31
  h11==0.16.0
32
- # via uvicorn
33
- hf-xet==1.1.8
 
 
34
  # via huggingface-hub
35
- httptools==0.6.4
 
 
36
  # via uvicorn
37
- huggingface-hub==0.34.4
 
 
 
 
38
  # via
39
  # tokenizers
40
  # transformers
41
- idna==3.10
 
 
 
 
 
 
42
  # via
43
  # anyio
 
 
 
44
  # requests
 
 
 
 
 
 
 
 
 
 
 
 
45
  jinja2==3.1.6
46
- # via torch
47
- markupsafe==3.0.2
48
- # via jinja2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  mpmath==1.3.0
50
  # via sympy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
51
  networkx==3.5
52
  # via torch
53
- numpy==2.3.2
 
 
 
 
 
 
54
  # via
55
  # -r requirements.in
 
 
 
56
  # pandas
57
  # transformers
58
  nvidia-cublas-cu12==12.8.4.1
@@ -82,7 +266,7 @@ nvidia-cusparse-cu12==12.5.8.93
82
  # torch
83
  nvidia-cusparselt-cu12==0.7.1
84
  # via torch
85
- nvidia-nccl-cu12==2.27.3
86
  # via torch
87
  nvidia-nvjitlink-cu12==12.8.93
88
  # via
@@ -90,91 +274,233 @@ nvidia-nvjitlink-cu12==12.8.93
90
  # nvidia-cusolver-cu12
91
  # nvidia-cusparse-cu12
92
  # torch
 
 
93
  nvidia-nvtx-cu12==12.8.90
94
  # via torch
95
  packaging==25.0
96
  # via
97
  # huggingface-hub
 
 
 
 
 
 
98
  # transformers
99
- pandas==2.3.2
100
- # via -r requirements.in
101
- pillow==10.1.0
102
- # via -r requirements.in
103
- pyarrow==21.0.0
 
 
 
 
 
 
 
104
  # via -r requirements.in
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
105
  pydantic==2.5.0
106
  # via
107
  # -r requirements.in
 
 
 
108
  # fastapi
109
  # pydantic-settings
110
  pydantic-core==2.14.1
111
  # via pydantic
112
  pydantic-settings==2.0.3
113
  # via -r requirements.in
 
 
 
 
 
114
  python-dateutil==2.9.0.post0
115
- # via pandas
 
 
 
116
  python-dotenv==0.21.0
117
  # via
118
  # -r requirements.in
119
  # pydantic-settings
120
  # uvicorn
 
 
121
  python-multipart==0.0.6
122
  # via -r requirements.in
123
  pytz==2025.2
124
  # via pandas
125
- pyyaml==6.0.2
126
  # via
127
  # huggingface-hub
 
128
  # transformers
129
  # uvicorn
130
- regex==2025.7.34
 
 
 
 
 
 
 
 
 
 
131
  # via transformers
132
  requests==2.32.5
133
  # via
134
  # -r requirements.in
 
 
135
  # huggingface-hub
 
136
  # transformers
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
137
  safetensors==0.6.2
138
  # via transformers
 
 
 
 
 
 
139
  six==1.17.0
140
- # via python-dateutil
 
 
141
  sniffio==1.3.1
142
  # via anyio
 
 
 
 
 
 
143
  starlette==0.27.0
144
  # via fastapi
145
  sympy==1.14.0
146
  # via torch
147
- tokenizers==0.15.2
 
 
 
 
 
 
148
  # via transformers
149
- torch==2.8.0
150
  # via -r requirements.in
 
 
 
 
 
 
 
 
151
  tqdm==4.67.1
152
  # via
 
153
  # huggingface-hub
154
  # transformers
155
- transformers==4.35.2
 
 
 
 
 
 
 
 
 
 
 
 
 
156
  # via -r requirements.in
157
- triton==3.4.0
158
  # via torch
159
  typing-extensions==4.15.0
160
  # via
 
 
161
  # fastapi
162
  # huggingface-hub
163
  # pydantic
164
  # pydantic-core
 
165
  # torch
166
  tzdata==2025.2
167
- # via pandas
 
 
 
 
168
  urllib3==2.5.0
169
  # via requests
170
- uvicorn[standard]==0.24.0
171
  # via -r requirements.in
172
- uvloop==0.21.0
173
  # via uvicorn
174
- watchfiles==1.1.0
175
  # via uvicorn
 
 
 
 
 
 
 
 
 
 
176
  websockets==15.0.1
177
- # via uvicorn
178
-
179
- # The following packages are considered to be unsafe in a requirements file:
180
- # setuptools
 
1
+ # This file was autogenerated by uv via the following command:
2
+ # uv pip compile requirements.in
3
+ absl-py==2.3.1
4
+ # via dyff-client
 
 
5
  annotated-types==0.7.0
6
  # via pydantic
7
  anyio==3.7.1
8
  # via
9
  # fastapi
10
+ # httpx
11
+ # jupyter-server
12
  # starlette
13
  # watchfiles
14
+ argon2-cffi==25.1.0
15
+ # via jupyter-server
16
+ argon2-cffi-bindings==25.1.0
17
+ # via argon2-cffi
18
+ arrow==1.4.0
19
+ # via isoduration
20
+ asttokens==3.0.0
21
+ # via stack-data
22
+ async-lru==2.0.5
23
+ # via jupyterlab
24
+ attrs==25.4.0
25
+ # via
26
+ # jsonschema
27
+ # referencing
28
+ azure-core==1.36.0
29
+ # via dyff-client
30
+ babel==2.17.0
31
+ # via jupyterlab-server
32
+ beautifulsoup4==4.14.2
33
+ # via
34
+ # dyff-audit
35
+ # nbconvert
36
+ bleach==6.3.0
37
+ # via nbconvert
38
+ canonicaljson==2.0.0
39
+ # via dyff-schema
40
+ certifi==2025.11.12
41
+ # via
42
+ # httpcore
43
+ # httpx
44
+ # requests
45
+ cffi==2.0.0
46
+ # via argon2-cffi-bindings
47
+ charset-normalizer==3.4.4
48
  # via requests
49
+ click==8.3.0
50
  # via uvicorn
51
+ comm==0.2.3
52
+ # via ipykernel
53
+ debugpy==1.8.17
54
+ # via ipykernel
55
+ decorator==5.2.1
56
+ # via ipython
57
+ defusedxml==0.7.1
58
+ # via nbconvert
59
+ dnspython==2.8.0
60
+ # via email-validator
61
+ # dyff==0.36.1
62
+ # via -r requirements.in
63
+ dyff-audit==0.16.1
64
+ # via dyff
65
+ dyff-client==0.23.5
66
+ # via
67
+ # dyff
68
+ # dyff-audit
69
+ dyff-schema==0.39.1
70
+ # via
71
+ # dyff
72
+ # dyff-audit
73
+ # dyff-client
74
+ email-validator==2.3.0
75
+ # via dyff-schema
76
+ executing==2.2.1
77
+ # via stack-data
78
  fastapi==0.104.1
79
  # via -r requirements.in
80
+ fastjsonschema==2.21.2
81
+ # via nbformat
82
+ filelock==3.20.0
83
  # via
84
  # huggingface-hub
85
  # torch
86
  # transformers
87
+ fqdn==1.5.1
88
+ # via jsonschema
89
+ fsspec==2025.10.0
90
  # via
91
  # huggingface-hub
92
  # torch
93
+ google-i18n-address==3.1.1
94
+ # via dyff-schema
95
  h11==0.16.0
96
+ # via
97
+ # httpcore
98
+ # uvicorn
99
+ hf-xet==1.2.0
100
  # via huggingface-hub
101
+ httpcore==1.0.9
102
+ # via httpx
103
+ httptools==0.7.1
104
  # via uvicorn
105
+ httpx==0.28.1
106
+ # via
107
+ # dyff-client
108
+ # jupyterlab
109
+ huggingface-hub==0.36.0
110
  # via
111
  # tokenizers
112
  # transformers
113
+ hypothesis==6.147.0
114
+ # via
115
+ # dyff-schema
116
+ # hypothesis-jsonschema
117
+ hypothesis-jsonschema==0.23.1
118
+ # via dyff-schema
119
+ idna==3.11
120
  # via
121
  # anyio
122
+ # email-validator
123
+ # httpx
124
+ # jsonschema
125
  # requests
126
+ ipykernel==7.1.0
127
+ # via jupyterlab
128
+ ipython==9.7.0
129
+ # via ipykernel
130
+ ipython-pygments-lexers==1.1.1
131
+ # via ipython
132
+ isodate==0.7.2
133
+ # via dyff-client
134
+ isoduration==20.11.0
135
+ # via jsonschema
136
+ jedi==0.19.2
137
+ # via ipython
138
  jinja2==3.1.6
139
+ # via
140
+ # jupyter-server
141
+ # jupyterlab
142
+ # jupyterlab-server
143
+ # nbconvert
144
+ # torch
145
+ json5==0.12.1
146
+ # via jupyterlab-server
147
+ jsonpath-ng==1.7.0
148
+ # via
149
+ # dyff-client
150
+ # dyff-schema
151
+ jsonpointer==3.0.0
152
+ # via jsonschema
153
+ jsonschema==4.25.1
154
+ # via
155
+ # hypothesis-jsonschema
156
+ # jupyter-events
157
+ # jupyterlab-server
158
+ # nbformat
159
+ jsonschema-specifications==2025.9.1
160
+ # via jsonschema
161
+ jupyter-client==8.6.3
162
+ # via
163
+ # ipykernel
164
+ # jupyter-server
165
+ # nbclient
166
+ jupyter-core==5.9.1
167
+ # via
168
+ # ipykernel
169
+ # jupyter-client
170
+ # jupyter-server
171
+ # jupyterlab
172
+ # nbclient
173
+ # nbconvert
174
+ # nbformat
175
+ jupyter-events==0.12.0
176
+ # via jupyter-server
177
+ jupyter-lsp==2.3.0
178
+ # via jupyterlab
179
+ jupyter-server==2.17.0
180
+ # via
181
+ # jupyter-lsp
182
+ # jupyterlab
183
+ # jupyterlab-server
184
+ # notebook
185
+ # notebook-shim
186
+ jupyter-server-terminals==0.5.3
187
+ # via jupyter-server
188
+ jupyterlab==4.4.10
189
+ # via notebook
190
+ jupyterlab-pygments==0.3.0
191
+ # via nbconvert
192
+ jupyterlab-server==2.28.0
193
+ # via
194
+ # jupyterlab
195
+ # notebook
196
+ lark==1.3.1
197
+ # via rfc3987-syntax
198
+ lxml==6.0.2
199
+ # via dyff-audit
200
+ markupsafe==3.0.3
201
+ # via
202
+ # jinja2
203
+ # nbconvert
204
+ matplotlib-inline==0.2.1
205
+ # via
206
+ # ipykernel
207
+ # ipython
208
+ mistune==3.1.4
209
+ # via nbconvert
210
  mpmath==1.3.0
211
  # via sympy
212
+ nbclient==0.10.2
213
+ # via nbconvert
214
+ nbconvert==7.16.6
215
+ # via
216
+ # dyff-audit
217
+ # jupyter-server
218
+ nbformat==5.10.4
219
+ # via
220
+ # dyff-audit
221
+ # jupyter-server
222
+ # nbclient
223
+ # nbconvert
224
+ nest-asyncio==1.6.0
225
+ # via ipykernel
226
  networkx==3.5
227
  # via torch
228
+ notebook==7.4.7
229
+ # via dyff-audit
230
+ notebook-shim==0.2.4
231
+ # via
232
+ # jupyterlab
233
+ # notebook
234
+ numpy==1.26.4
235
  # via
236
  # -r requirements.in
237
+ # dyff-audit
238
+ # dyff-client
239
+ # dyff-schema
240
  # pandas
241
  # transformers
242
  nvidia-cublas-cu12==12.8.4.1
 
266
  # torch
267
  nvidia-cusparselt-cu12==0.7.1
268
  # via torch
269
+ nvidia-nccl-cu12==2.27.5
270
  # via torch
271
  nvidia-nvjitlink-cu12==12.8.93
272
  # via
 
274
  # nvidia-cusolver-cu12
275
  # nvidia-cusparse-cu12
276
  # torch
277
+ nvidia-nvshmem-cu12==3.3.20
278
+ # via torch
279
  nvidia-nvtx-cu12==12.8.90
280
  # via torch
281
  packaging==25.0
282
  # via
283
  # huggingface-hub
284
+ # ipykernel
285
+ # jupyter-events
286
+ # jupyter-server
287
+ # jupyterlab
288
+ # jupyterlab-server
289
+ # nbconvert
290
  # transformers
291
+ pandas==2.3.3
292
+ # via
293
+ # -r requirements.in
294
+ # dyff-audit
295
+ # dyff-client
296
+ pandocfilters==1.5.1
297
+ # via nbconvert
298
+ parso==0.8.5
299
+ # via jedi
300
+ pexpect==4.9.0
301
+ # via ipython
302
+ pillow==12.0.0
303
  # via -r requirements.in
304
+ platformdirs==4.5.0
305
+ # via jupyter-core
306
+ ply==3.11
307
+ # via jsonpath-ng
308
+ prometheus-client==0.23.1
309
+ # via jupyter-server
310
+ prompt-toolkit==3.0.52
311
+ # via ipython
312
+ psutil==7.1.3
313
+ # via ipykernel
314
+ ptyprocess==0.7.0
315
+ # via
316
+ # pexpect
317
+ # terminado
318
+ pure-eval==0.2.3
319
+ # via stack-data
320
+ pyarrow==22.0.0
321
+ # via
322
+ # -r requirements.in
323
+ # dyff-audit
324
+ # dyff-client
325
+ # dyff-schema
326
+ pycparser==2.23
327
+ # via cffi
328
  pydantic==2.5.0
329
  # via
330
  # -r requirements.in
331
+ # dyff-audit
332
+ # dyff-client
333
+ # dyff-schema
334
  # fastapi
335
  # pydantic-settings
336
  pydantic-core==2.14.1
337
  # via pydantic
338
  pydantic-settings==2.0.3
339
  # via -r requirements.in
340
+ pygments==2.19.2
341
+ # via
342
+ # ipython
343
+ # ipython-pygments-lexers
344
+ # nbconvert
345
  python-dateutil==2.9.0.post0
346
+ # via
347
+ # arrow
348
+ # jupyter-client
349
+ # pandas
350
  python-dotenv==0.21.0
351
  # via
352
  # -r requirements.in
353
  # pydantic-settings
354
  # uvicorn
355
+ python-json-logger==4.0.0
356
+ # via jupyter-events
357
  python-multipart==0.0.6
358
  # via -r requirements.in
359
  pytz==2025.2
360
  # via pandas
361
+ pyyaml==6.0.3
362
  # via
363
  # huggingface-hub
364
+ # jupyter-events
365
  # transformers
366
  # uvicorn
367
+ pyzmq==27.1.0
368
+ # via
369
+ # ipykernel
370
+ # jupyter-client
371
+ # jupyter-server
372
+ referencing==0.37.0
373
+ # via
374
+ # jsonschema
375
+ # jsonschema-specifications
376
+ # jupyter-events
377
+ regex==2025.11.3
378
  # via transformers
379
  requests==2.32.5
380
  # via
381
  # -r requirements.in
382
+ # azure-core
383
+ # google-i18n-address
384
  # huggingface-hub
385
+ # jupyterlab-server
386
  # transformers
387
+ rfc3339-validator==0.1.4
388
+ # via
389
+ # jsonschema
390
+ # jupyter-events
391
+ rfc3986-validator==0.1.1
392
+ # via
393
+ # jsonschema
394
+ # jupyter-events
395
+ rfc3987-syntax==1.1.0
396
+ # via jsonschema
397
+ rpds-py==0.28.0
398
+ # via
399
+ # jsonschema
400
+ # referencing
401
+ ruamel-yaml==0.18.16
402
+ # via dyff-audit
403
+ ruamel-yaml-clib==0.2.14
404
+ # via ruamel-yaml
405
  safetensors==0.6.2
406
  # via transformers
407
+ send2trash==1.8.3
408
+ # via jupyter-server
409
+ setuptools==80.9.0
410
+ # via
411
+ # jupyterlab
412
+ # torch
413
  six==1.17.0
414
+ # via
415
+ # python-dateutil
416
+ # rfc3339-validator
417
  sniffio==1.3.1
418
  # via anyio
419
+ sortedcontainers==2.4.0
420
+ # via hypothesis
421
+ soupsieve==2.8
422
+ # via beautifulsoup4
423
+ stack-data==0.6.3
424
+ # via ipython
425
  starlette==0.27.0
426
  # via fastapi
427
  sympy==1.14.0
428
  # via torch
429
+ terminado==0.18.1
430
+ # via
431
+ # jupyter-server
432
+ # jupyter-server-terminals
433
+ tinycss2==1.4.0
434
+ # via bleach
435
+ tokenizers==0.22.1
436
  # via transformers
437
+ torch==2.9.1
438
  # via -r requirements.in
439
+ tornado==6.5.2
440
+ # via
441
+ # ipykernel
442
+ # jupyter-client
443
+ # jupyter-server
444
+ # jupyterlab
445
+ # notebook
446
+ # terminado
447
  tqdm==4.67.1
448
  # via
449
+ # dyff-client
450
  # huggingface-hub
451
  # transformers
452
+ traitlets==5.14.3
453
+ # via
454
+ # ipykernel
455
+ # ipython
456
+ # jupyter-client
457
+ # jupyter-core
458
+ # jupyter-events
459
+ # jupyter-server
460
+ # jupyterlab
461
+ # matplotlib-inline
462
+ # nbclient
463
+ # nbconvert
464
+ # nbformat
465
+ transformers==4.57.1
466
  # via -r requirements.in
467
+ triton==3.5.1
468
  # via torch
469
  typing-extensions==4.15.0
470
  # via
471
+ # azure-core
472
+ # beautifulsoup4
473
  # fastapi
474
  # huggingface-hub
475
  # pydantic
476
  # pydantic-core
477
+ # referencing
478
  # torch
479
  tzdata==2025.2
480
+ # via
481
+ # arrow
482
+ # pandas
483
+ uri-template==1.3.0
484
+ # via jsonschema
485
  urllib3==2.5.0
486
  # via requests
487
+ uvicorn==0.24.0
488
  # via -r requirements.in
489
+ uvloop==0.22.1
490
  # via uvicorn
491
+ watchfiles==1.1.1
492
  # via uvicorn
493
+ wcwidth==0.2.14
494
+ # via prompt-toolkit
495
+ webcolors==25.10.0
496
+ # via jsonschema
497
+ webencodings==0.5.1
498
+ # via
499
+ # bleach
500
+ # tinycss2
501
+ websocket-client==1.9.0
502
+ # via jupyter-server
503
  websockets==15.0.1
504
+ # via
505
+ # dyff-client
506
+ # uvicorn
 
scripts/model_download.bash CHANGED
@@ -1,8 +1,10 @@
 
 
1
  python - <<'PY'
2
  from huggingface_hub import snapshot_download
3
  snapshot_download(
4
  repo_id="microsoft/resnet-18",
5
- local_dir="models/resnet-18",
6
  local_dir_use_symlinks=False # copies files; safer for containers
7
  )
8
  PY
 
1
+ mkdir -p "models/microsoft/resnet-18"
2
+
3
  python - <<'PY'
4
  from huggingface_hub import snapshot_download
5
  snapshot_download(
6
  repo_id="microsoft/resnet-18",
7
+ local_dir="models/microsoft/resnet-18",
8
  local_dir_use_symlinks=False # copies files; safer for containers
9
  )
10
  PY