wolfofbackstreet commited on
Commit
a2549a7
·
verified ·
1 Parent(s): 21fc05b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +73 -73
app.py CHANGED
@@ -1,74 +1,74 @@
1
- import os
2
- from flask import Flask, request, jsonify, send_file
3
- from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
4
- from PIL import Image
5
- import io
6
- import torch
7
- import logging
8
-
9
- # Set up logging
10
- logging.basicConfig(level=logging.INFO)
11
- logger = logging.getLogger(__name__)
12
-
13
- app = Flask(__name__)
14
-
15
- # Set cache directories
16
- os.environ["HF_HOME"] = "/app/cache/huggingface"
17
- os.environ["MPLCONFIGDIR"] = "/app/matplotlib_cache"
18
- os.environ["OPENVINO_TELEMETRY_DIR"] = "/app/openvino_cache"
19
-
20
- # Ensure cache directories exist
21
- for cache_dir in ["/app/cache/huggingface", "/app/matplotlib_cache", "/app/openvino_cache"]:
22
- os.makedirs(cache_dir, exist_ok=True)
23
-
24
- # Load the base pre-converted OpenVINO SDXL model
25
- base_model_id = "rupeshs/hyper-sd-sdxl-1-step-openvino-int8"
26
- try:
27
- pipeline = OVStableDiffusionPipeline.from_pretrained(
28
- base_model_id,
29
- ov_config={"CACHE_DIR": "/app/cache/openvino"},
30
- device="CPU"
31
- )
32
- logger.info("Base model loaded successfully")
33
- except Exception as e:
34
- logger.error(f"Failed to load base model: {str(e)}")
35
- raise
36
-
37
- @app.route('/generate', methods=['POST'])
38
- def generate_image():
39
- try:
40
- # Get parameters from request
41
- data = request.get_json()
42
- prompt = data.get('prompt', 'A futuristic cityscape at sunset, cyberpunk style, 8k')
43
- width = data.get('width', 512)
44
- height = data.get('height', 512)
45
- num_inference_steps = data.get('num_inference_steps', 1)
46
- guidance_scale = data.get('guidance_scale', 1.0)
47
-
48
- # Generate image
49
- image = pipeline(
50
- prompt=prompt,
51
- width=width,
52
- height=height,
53
- num_inference_steps=num_inference_steps,
54
- guidance_scale=guidance_scale
55
- ).images[0]
56
-
57
- # Save image to a bytes buffer
58
- img_io = io.BytesIO()
59
- image.save(img_io, 'PNG')
60
- img_io.seek(0)
61
-
62
- return send_file(
63
- img_io,
64
- mimetype='image/png',
65
- as_attachment=True,
66
- download_name='generated_image.png'
67
- )
68
- except Exception as e:
69
- logger.error(f"Image generation failed: {str(e)}")
70
- return jsonify({'error': str(e)}), 500
71
-
72
- if __name__ == '__main__':
73
- port = int(os.getenv('PORT', 7860))
74
  app.run(host='0.0.0.0', port=port)
 
1
+ import os
2
+ from flask import Flask, request, jsonify, send_file
3
+ from optimum.intel.openvino.modeling_diffusion import OVStableDiffusionPipeline
4
+ from PIL import Image
5
+ import io
6
+ import torch
7
+ import logging
8
+
9
+ # Set up logging
10
+ logging.basicConfig(level=logging.INFO)
11
+ logger = logging.getLogger(__name__)
12
+
13
+ app = Flask(__name__)
14
+
15
+ # Set cache directories
16
+ os.environ["HF_HOME"] = "/app/cache/huggingface"
17
+ os.environ["MPLCONFIGDIR"] = "/app/matplotlib_cache"
18
+ os.environ["OPENVINO_TELEMETRY_DIR"] = "/app/openvino_cache"
19
+
20
+ # Ensure cache directories exist
21
+ for cache_dir in ["/app/cache/huggingface", "/app/matplotlib_cache", "/app/openvino_cache"]:
22
+ os.makedirs(cache_dir, exist_ok=True)
23
+
24
+ # Load the base pre-converted OpenVINO SDXL model
25
+ base_model_id = "rupeshs/hyper-sd-sdxl-1-step-openvino-int8"
26
+ try:
27
+ pipeline = OVStableDiffusionPipeline.from_pretrained(
28
+ base_model_id,
29
+ ov_config={"CACHE_DIR": "/app/cache/openvino"},
30
+ device="CPU"
31
+ )
32
+ logger.info("Base model loaded successfully")
33
+ except Exception as e:
34
+ logger.error(f"Failed to load base model: {str(e)}")
35
+ raise
36
+
37
+ @app.route('/generate', methods=['POST'])
38
+ def generate_image():
39
+ try:
40
+ # Get parameters from request
41
+ data = request.get_json()
42
+ prompt = data.get('prompt', 'A futuristic cityscape at sunset, cyberpunk style, 8k')
43
+ width = data.get('width', 512)
44
+ height = data.get('height', 512)
45
+ num_inference_steps = data.get('num_inference_steps', 1)
46
+ guidance_scale = data.get('guidance_scale', 1.0)
47
+
48
+ # Generate image
49
+ image = pipeline(
50
+ prompt=prompt,
51
+ width=768,
52
+ height=768,
53
+ num_inference_steps=1,
54
+ guidance_scale=1.0,
55
+ ).images[0]
56
+
57
+ # Save image to a bytes buffer
58
+ img_io = io.BytesIO()
59
+ image.save(img_io, 'PNG')
60
+ img_io.seek(0)
61
+
62
+ return send_file(
63
+ img_io,
64
+ mimetype='image/png',
65
+ as_attachment=True,
66
+ download_name='generated_image.png'
67
+ )
68
+ except Exception as e:
69
+ logger.error(f"Image generation failed: {str(e)}")
70
+ return jsonify({'error': str(e)}), 500
71
+
72
+ if __name__ == '__main__':
73
+ port = int(os.getenv('PORT', 7860))
74
  app.run(host='0.0.0.0', port=port)