-
Notifications
You must be signed in to change notification settings - Fork 27
/
Copy pathupscale.py
156 lines (140 loc) · 4.78 KB
/
upscale.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
import logging
import os
import random
from typing import Annotated, Dict, Tuple, Union
import torch
from fastapi import APIRouter, Depends, File, Form, UploadFile, status
from fastapi.responses import JSONResponse
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from PIL import Image, ImageFile
from app.dependencies import get_pipeline
from app.pipelines.base import Pipeline
from app.routes.utils import (
HTTPError,
ImageResponse,
handle_pipeline_exception,
http_error,
image_to_data_url,
)
ImageFile.LOAD_TRUNCATED_IMAGES = True
router = APIRouter()
logger = logging.getLogger(__name__)
# Pipeline specific error handling configuration.
PIPELINE_ERROR_CONFIG: Dict[str, Tuple[Union[str, None], int]] = {
# Specific error types.
"OutOfMemoryError": (
"Out of memory error. Try reducing input image resolution.",
status.HTTP_500_INTERNAL_SERVER_ERROR,
)
}
RESPONSES = {
status.HTTP_200_OK: {
"content": {
"application/json": {
"schema": {
"x-speakeasy-name-override": "data",
}
}
},
},
status.HTTP_400_BAD_REQUEST: {"model": HTTPError},
status.HTTP_401_UNAUTHORIZED: {"model": HTTPError},
status.HTTP_500_INTERNAL_SERVER_ERROR: {"model": HTTPError},
}
# TODO: Make model_id and other None properties optional once Go codegen tool supports
# OAPI 3.1 https://github.com/deepmap/oapi-codegen/issues/373
@router.post(
"/upscale",
response_model=ImageResponse,
responses=RESPONSES,
description="Upscale an image by increasing its resolution.",
operation_id="genUpscale",
summary="Upscale",
tags=["generate"],
openapi_extra={"x-speakeasy-name-override": "upscale"},
)
@router.post(
"/upscale/",
response_model=ImageResponse,
responses=RESPONSES,
include_in_schema=False,
)
async def upscale(
prompt: Annotated[
str,
Form(description="Text prompt(s) to guide upscaled image generation."),
],
image: Annotated[
UploadFile,
File(description="Uploaded image to modify with the pipeline."),
],
model_id: Annotated[
str,
Form(description="Hugging Face model ID used for upscaled image generation."),
] = "",
safety_check: Annotated[
bool,
Form(
description=(
"Perform a safety check to estimate if generated images could be "
"offensive or harmful."
)
),
] = True,
seed: Annotated[int, Form(description="Seed for random number generation.")] = None,
num_inference_steps: Annotated[
int,
Form(
description=(
"Number of denoising steps. More steps usually lead to higher quality "
"images but slower inference. Modulated by strength."
)
),
] = 75, # NOTE: Hardcoded due to varying pipeline values.
pipeline: Pipeline = Depends(get_pipeline),
token: HTTPAuthorizationCredentials = Depends(HTTPBearer(auto_error=False)),
):
auth_token = os.environ.get("AUTH_TOKEN")
if auth_token:
if not token or token.credentials != auth_token:
return JSONResponse(
status_code=status.HTTP_401_UNAUTHORIZED,
headers={"WWW-Authenticate": "Bearer"},
content=http_error("Invalid bearer token."),
)
if model_id != "" and model_id != pipeline.model_id:
return JSONResponse(
status_code=status.HTTP_400_BAD_REQUEST,
content=http_error(
f"pipeline configured with {pipeline.model_id} but called with "
f"{model_id}."
),
)
seed = seed or random.randint(0, 2**32 - 1)
image = Image.open(image.file).convert("RGB")
try:
images, has_nsfw_concept = pipeline(
prompt=prompt,
image=image,
num_inference_steps=num_inference_steps,
safety_check=safety_check,
seed=seed,
)
except Exception as e:
if isinstance(e, torch.cuda.OutOfMemoryError):
# TODO: Investigate why not all VRAM memory is cleared.
torch.cuda.empty_cache()
logger.error(f"TextToImage pipeline error: {e}")
return handle_pipeline_exception(
e,
default_error_message="Upscale pipeline error.",
custom_error_config=PIPELINE_ERROR_CONFIG,
)
seeds = [seed]
# TODO: Return None once Go codegen tool supports optional properties
# OAPI 3.1 https://github.com/deepmap/oapi-codegen/issues/373
output_images = [
{"url": image_to_data_url(img), "seed": sd, "nsfw": nsfw or False}
for img, sd, nsfw in zip(images, seeds, has_nsfw_concept)
]
return {"images": output_images}