Spaces:
Runtime error
Runtime error
| import torch | |
| from transformers import GLPNImageProcessor, GLPNForDepthEstimation | |
| from image_resize import resize_img | |
| def depth_detection(image, pad=16): | |
| feature_extractor = GLPNImageProcessor.from_pretrained("vinvino02/glpn-nyu") | |
| model = GLPNForDepthEstimation.from_pretrained("vinvino02/glpn-nyu") | |
| new_img = resize_img(image) | |
| inputs = feature_extractor(images=new_img, return_tensors="pt") | |
| with torch.no_grad(): | |
| outputs = model(**inputs) | |
| predicted_depth = outputs.predicted_depth | |
| output = predicted_depth.squeeze().cpu().numpy() * 1000.0 | |
| output = output[pad:-pad, pad:-pad] | |
| new_image = new_img.crop((pad, pad, new_img.width - pad, new_img.height - pad)) | |
| return new_image,output | |