diff --git a/runner/Dockerfile b/runner/Dockerfile index f4138e8e..327c4c92 100644 --- a/runner/Dockerfile +++ b/runner/Dockerfile @@ -1,6 +1,6 @@ # Based on https://github.com/huggingface/api-inference-community/blob/main/docker_images/diffusers/Dockerfile -FROM nvidia/cuda:12.1.1-cudnn8-runtime-ubuntu20.04 +FROM nvidia/cuda:12.4.1-cudnn-runtime-ubuntu22.04 LABEL maintainer="Yondon Fu " # Add any system dependency here diff --git a/runner/app/main.py b/runner/app/main.py index c4dd6b8e..8c54a051 100644 --- a/runner/app/main.py +++ b/runner/app/main.py @@ -48,7 +48,7 @@ def load_pipeline(pipeline: str, model_id: str) -> any: from app.pipelines.audio_to_text import AudioToTextPipeline return AudioToTextPipeline(model_id) - case "FILMPipeline": + case "frame-interpolation": from app.pipelines.frame_interpolation import FILMPipeline return FILMPipeline(model_id) @@ -80,7 +80,7 @@ def load_route(pipeline: str) -> any: from app.routes import audio_to_text return audio_to_text.router - case "FILMPipeline": + case "frame-interpolation": from app.routes import frame_interpolation return frame_interpolation.router diff --git a/runner/app/pipelines/frame_interpolation.py b/runner/app/pipelines/frame_interpolation.py index 755afee7..3086ed57 100644 --- a/runner/app/pipelines/frame_interpolation.py +++ b/runner/app/pipelines/frame_interpolation.py @@ -1,4 +1,5 @@ import torch +import os from torchvision.transforms import v2 from tqdm import tqdm import bisect @@ -10,7 +11,7 @@ class FILMPipeline: model: torch.jit.ScriptModule def __init__(self, model_id: str): - self.model_id = model_id + model_id = os.environ.get("MODEL_ID", "") model_dir = get_model_dir() # Get the directory where models are stored model_path = f"{model_dir}/{model_id}" # Construct the full path to the model file diff --git a/runner/app/pipelines/upscale.py b/runner/app/pipelines/upscale.py index 6fd3cefe..360f4c10 100644 --- a/runner/app/pipelines/upscale.py +++ b/runner/app/pipelines/upscale.py @@ -1,5 +1,7 @@ import logging import os +import time +from compel import Compel, ReturnedEmbeddingsType from typing import List, Optional, Tuple import PIL @@ -114,7 +116,29 @@ def __call__( if num_inference_steps is None or num_inference_steps < 1: kwargs.pop("num_inference_steps", None) - output = self.ldm(prompt, image=image, **kwargs) + # trying differnt configs of promp_embed for different models + try: + compel_proc=Compel(tokenizer=self.ldm.tokenizer, text_encoder=self.ldm.text_encoder) + prompt=embeds = compel_proc(prompt) + output = self.ldm(prompt_embeds=prompt_embeds, image=image, **kwargs) + except Exception as e: + logging.info(f"Failed to generate prompt embeddings: {e}. Using prompt and pooled embeddings.") + + try: + compel_proc = Compel(tokenizer=[self.ldm.tokenizer, self.ldm.tokenizer_2], + text_encoder=[self.ldm.text_encoder, self.ldm.text_encoder_2], + returned_embeddings_type=ReturnedEmbeddingsType.PENULTIMATE_HIDDEN_STATES_NON_NORMALIZED, + requires_pooled=[False, True]) + prompt_embeds, pooled_prompt_embeds = compel_proc(prompt) + output = self.ldm( + prompt_embeds=prompt_embeds, + pooled_prompt_embeds=pooled_prompt_embeds, + image=image, + **kwargs + ) + except Exception as e: + logging.info(f"Failed to generate prompt and pooled embeddings: {e}. Trying normal prompt.") + output = self.ldm(prompt, image=image, **kwargs) if safety_check: _, has_nsfw_concept = self._safety_checker.check_nsfw_images(output.images) diff --git a/runner/app/pipelines/utils/utils.py b/runner/app/pipelines/utils/utils.py index 5c9ad6a5..9de0b0c2 100644 --- a/runner/app/pipelines/utils/utils.py +++ b/runner/app/pipelines/utils/utils.py @@ -193,7 +193,7 @@ def video_shredder(video_data, is_file_path=True) -> np.ndarray: # Handle in-memory video input # Create a temporary file to store in-memory video data with tempfile.NamedTemporaryFile(delete=False, suffix='.mp4') as temp_file: - temp_file.write(video_data.getvalue()) + temp_file.write(video_data) temp_file_path = temp_file.name # Open the temporary video file diff --git a/runner/app/routes/frame_interpolation.py b/runner/app/routes/frame_interpolation.py index 6abf6a66..d11458e9 100644 --- a/runner/app/routes/frame_interpolation.py +++ b/runner/app/routes/frame_interpolation.py @@ -4,6 +4,7 @@ import os import torch import glob +import cv2 from typing import Annotated, Optional from fastapi import APIRouter, Depends, File, Form, UploadFile, status from fastapi.responses import JSONResponse @@ -12,8 +13,8 @@ from app.dependencies import get_pipeline from app.pipelines.frame_interpolation import FILMPipeline -from app.pipelines.utils.utils import DirectoryReader, DirectoryWriter, get_torch_device, get_model_dir -from app.routes.util import HTTPError, ImageResponse, http_error, image_to_data_url +from app.pipelines.utils.utils import DirectoryReader, DirectoryWriter, get_torch_device, video_shredder +from app.routes.util import HTTPError, VideoResponse, http_error, image_to_data_url ImageFile.LOAD_TRUNCATED_IMAGES = True @@ -27,18 +28,16 @@ status.HTTP_500_INTERNAL_SERVER_ERROR: {"model": HTTPError}, } -@router.post("/frame_interpolation", response_model=ImageResponse, responses=RESPONSES) +@router.post("/frame-interpolation", response_model=VideoResponse, responses=RESPONSES) @router.post( - "/frame_interpolation/", - response_model=ImageResponse, + "/frame-interpolation/", + response_model=VideoResponse, responses=RESPONSES, include_in_schema=False, ) async def frame_interpolation( - model_id: Annotated[str, Form()], - image1: Annotated[UploadFile, File()]=None, - image2: Annotated[UploadFile, File()]=None, - image_dir: Annotated[str, Form()]="", + model_id: Annotated[str, Form()] = "", + video: Annotated[UploadFile, File()]=None, inter_frames: Annotated[int, Form()] = 2, token: HTTPAuthorizationCredentials = Depends(HTTPBearer(auto_error=False)), ): @@ -51,10 +50,9 @@ async def frame_interpolation( content=http_error("Invalid bearer token"), ) - # Initialize FILMPipeline film_pipeline = FILMPipeline(model_id) - film_pipeline.to(device=get_torch_device(),dtype=torch.float16) + film_pipeline.to(device=get_torch_device(), dtype=torch.float16) # Prepare directories for input and output temp_input_dir = "temp_input" @@ -63,31 +61,21 @@ async def frame_interpolation( os.makedirs(temp_output_dir, exist_ok=True) try: - if os.path.isdir(image_dir): - if image1 and image2: - logger.info("Both directory and individual images provided. Directory will be used, and images will be ignored.") - reader = DirectoryReader(image_dir) - else: - if not (image1 and image2): - return JSONResponse( - status_code=status.HTTP_400_BAD_REQUEST, - content=http_error("Either a directory or two images must be provided."), - ) - - image1_path = os.path.join(temp_input_dir, "0.png") - image2_path = os.path.join(temp_input_dir, "1.png") - - with open(image1_path, "wb") as f: - f.write(await image1.read()) - with open(image2_path, "wb") as f: - f.write(await image2.read()) - - reader = DirectoryReader(temp_input_dir) + # Extract frames from video + video_data = await video.read() + frames = video_shredder(video_data, is_file_path=False) + # Save frames to temporary directory + for i, frame in enumerate(frames): + frame_path = os.path.join(temp_input_dir, f"{i}.png") + cv2.imwrite(frame_path, frame) + + # Create DirectoryReader and DirectoryWriter + reader = DirectoryReader(temp_input_dir) writer = DirectoryWriter(temp_output_dir) + # Perform interpolation film_pipeline(reader, writer, inter_frames=inter_frames) - writer.close() reader.reset() @@ -96,8 +84,8 @@ async def frame_interpolation( for frame_path in sorted(glob.glob(os.path.join(temp_output_dir, "*.png"))): frame = Image.open(frame_path) output_frames.append(frame) - - output_images = [{"url": image_to_data_url(frame),"seed":0, "nsfw":False} for frame in output_frames] + # Wrap output frames in a list of batches (with a single batch in this case) + output_images = [[{"url": image_to_data_url(frame), "seed": 0, "nsfw": False} for frame in output_frames]] except Exception as e: logger.error(f"FILMPipeline error: {e}") @@ -106,15 +94,13 @@ async def frame_interpolation( status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, content=http_error("FILMPipeline error"), ) - finally: # Clean up temporary directories for file_path in glob.glob(os.path.join(temp_input_dir, "*")): os.remove(file_path) os.rmdir(temp_input_dir) - for file_path in glob.glob(os.path.join(temp_output_dir, "*")): os.remove(file_path) os.rmdir(temp_output_dir) - return {"images": output_images} + return {"frames": output_images} diff --git a/runner/openapi.json b/runner/openapi.json index 2e2bb4e7..e55aa404 100644 --- a/runner/openapi.json +++ b/runner/openapi.json @@ -332,7 +332,80 @@ ] } }, - "/frame_interpolation": { + "/llm-generate": { + "post": { + "summary": "Llm Generate", + "operationId": "llm_generate", + "requestBody": { + "content": { + "application/x-www-form-urlencoded": { + "schema": { + "$ref": "#/components/schemas/Body_llm_generate_llm_generate_post" + } + } + }, + "required": true + }, + "responses": { + "200": { + "description": "Successful Response", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/LlmResponse" + } + } + } + }, + "400": { + "description": "Bad Request", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "401": { + "description": "Unauthorized", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "500": { + "description": "Internal Server Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPError" + } + } + } + }, + "422": { + "description": "Validation Error", + "content": { + "application/json": { + "schema": { + "$ref": "#/components/schemas/HTTPValidationError" + } + } + } + } + }, + "security": [ + { + "HTTPBearer": [] + } + ] + } + }, + "/frame-interpolation": { "post": { "summary": "Frame Interpolation", "operationId": "frame_interpolation", @@ -340,11 +413,15 @@ "content": { "multipart/form-data": { "schema": { - "$ref": "#/components/schemas/Body_frame_interpolation_frame_interpolation_post" + "allOf": [ + { + "$ref": "#/components/schemas/Body_frame_interpolation_frame_interpolation_post" + } + ], + "title": "Body" } } - }, - "required": true + } }, "responses": { "200": { @@ -352,7 +429,7 @@ "content": { "application/json": { "schema": { - "$ref": "#/components/schemas/ImageResponse" + "$ref": "#/components/schemas/VideoResponse" } } } @@ -517,22 +594,13 @@ "properties": { "model_id": { "type": "string", - "title": "Model Id" - }, - "image1": { - "type": "string", - "format": "binary", - "title": "Image1" + "title": "Model Id", + "default": "" }, - "image2": { + "video": { "type": "string", "format": "binary", - "title": "Image2" - }, - "image_dir": { - "type": "string", - "title": "Image Dir", - "default": "" + "title": "Video" }, "inter_frames": { "type": "integer", @@ -541,9 +609,6 @@ } }, "type": "object", - "required": [ - "model_id" - ], "title": "Body_frame_interpolation_frame_interpolation_post" }, "Body_image_to_image_image_to_image_post": { diff --git a/worker/docker.go b/worker/docker.go index 8d7f97e0..315075a3 100644 --- a/worker/docker.go +++ b/worker/docker.go @@ -30,11 +30,13 @@ const containerCreator = "ai-worker" // This only works right now on a single GPU because if there is another container // using the GPU we stop it so we don't have to worry about having enough ports var containerHostPorts = map[string]string{ - "text-to-image": "8000", - "image-to-image": "8001", - "image-to-video": "8002", - "upscale": "8003", - "audio-to-text": "8004", + "text-to-image": "8000", + "image-to-image": "8001", + "image-to-video": "8002", + "upscale": "8003", + "audio-to-text": "8004", + "llm": "8005", + "frame-interpolation": "8006", } type DockerManager struct { diff --git a/worker/runner.gen.go b/worker/runner.gen.go index 788a7e82..85b4b590 100644 --- a/worker/runner.gen.go +++ b/worker/runner.gen.go @@ -39,11 +39,9 @@ type BodyAudioToTextAudioToTextPost struct { // BodyFrameInterpolationFrameInterpolationPost defines model for Body_frame_interpolation_frame_interpolation_post. type BodyFrameInterpolationFrameInterpolationPost struct { - Image1 *openapi_types.File `json:"image1,omitempty"` - Image2 *openapi_types.File `json:"image2,omitempty"` - ImageDir *string `json:"image_dir,omitempty"` InterFrames *int `json:"inter_frames,omitempty"` - ModelId string `json:"model_id"` + ModelId *string `json:"model_id,omitempty"` + Video *openapi_types.File `json:"video,omitempty"` } // BodyImageToImageImageToImagePost defines model for Body_image_to_image_image_to_image_post. @@ -161,11 +159,14 @@ type Chunk struct { Timestamp []interface{} `json:"timestamp"` } +// FrameInterpolationMultipartBody defines parameters for FrameInterpolation. +type FrameInterpolationMultipartBody = BodyFrameInterpolationFrameInterpolationPost + // AudioToTextMultipartRequestBody defines body for AudioToText for multipart/form-data ContentType. type AudioToTextMultipartRequestBody = BodyAudioToTextAudioToTextPost // FrameInterpolationMultipartRequestBody defines body for FrameInterpolation for multipart/form-data ContentType. -type FrameInterpolationMultipartRequestBody = BodyFrameInterpolationFrameInterpolationPost +type FrameInterpolationMultipartRequestBody = FrameInterpolationMultipartBody // ImageToImageMultipartRequestBody defines body for ImageToImage for multipart/form-data ContentType. type ImageToImageMultipartRequestBody = BodyImageToImageImageToImagePost @@ -472,7 +473,7 @@ func NewFrameInterpolationRequestWithBody(server string, contentType string, bod return nil, err } - operationPath := fmt.Sprintf("/frame_interpolation") + operationPath := fmt.Sprintf("/frame-interpolation") if operationPath[0] == '/' { operationPath = "." + operationPath } @@ -743,7 +744,7 @@ func (r AudioToTextResponse) StatusCode() int { type FrameInterpolationResponse struct { Body []byte HTTPResponse *http.Response - JSON200 *ImageResponse + JSON200 *VideoResponse JSON400 *HTTPError JSON401 *HTTPError JSON422 *HTTPValidationError @@ -1039,7 +1040,7 @@ func ParseFrameInterpolationResponse(rsp *http.Response) (*FrameInterpolationRes switch { case strings.Contains(rsp.Header.Get("Content-Type"), "json") && rsp.StatusCode == 200: - var dest ImageResponse + var dest VideoResponse if err := json.Unmarshal(bodyBytes, &dest); err != nil { return nil, err } @@ -1326,7 +1327,7 @@ type ServerInterface interface { // (POST /audio-to-text) AudioToText(w http.ResponseWriter, r *http.Request) // Frame Interpolation - // (POST /frame_interpolation) + // (POST /frame-interpolation) FrameInterpolation(w http.ResponseWriter, r *http.Request) // Health // (GET /health) @@ -1356,7 +1357,7 @@ func (_ Unimplemented) AudioToText(w http.ResponseWriter, r *http.Request) { } // Frame Interpolation -// (POST /frame_interpolation) +// (POST /frame-interpolation) func (_ Unimplemented) FrameInterpolation(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusNotImplemented) } @@ -1634,7 +1635,7 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl r.Post(options.BaseURL+"/audio-to-text", wrapper.AudioToText) }) r.Group(func(r chi.Router) { - r.Post(options.BaseURL+"/frame_interpolation", wrapper.FrameInterpolation) + r.Post(options.BaseURL+"/frame-interpolation", wrapper.FrameInterpolation) }) r.Group(func(r chi.Router) { r.Get(options.BaseURL+"/health", wrapper.Health) @@ -1658,33 +1659,36 @@ func HandlerWithOptions(si ServerInterface, options ChiServerOptions) http.Handl // Base64 encoded, gzipped, json marshaled Swagger object var swaggerSpec = []string{ - "H4sIAAAAAAAC/+xZWW/bOhb+KwRnHp14aTMZ+C1Jt2C6BI3beSgCg5GObbYSqUtSaX0D//cLHsoSJVOR", - "jTS+QK6fvOgs31m+w0X3NJJpJgUIo+n4nupoASnDr2dXl6+Vksp+z5TMQBkO+CTVc/thuEmAjukHPac9", - "apaZ/aGN4mJOV6seVfBHzhXEdPwNVW56pUppu9STt98hMnTVo+cyXk5ZHnM5NXJq4Jdp/MqkNpugUMZ+", - "mUmVMkPH9JYLppbU84oiG1B7NJUxJFMeW/UYZixPrL6n+cEKkMu4M06Hwot0u2ja0jBTLIUpFwZUJhNm", - "uBTB/8Ip4Smbw/DhnFw6mUBSUHu0hfaoVXsac9WaU9Qlr7gKqtvwXKi6ZmHkGbAy5I2TKW1YzTmoZll3", - "rmSp3CzmTjVpK6xLj5HFl8bPcDnnOY+ZiGCqI2bheFk5PT6pUL4t5Mg1ypUQRJ7eusSgly0q217YB7AM", - "fSyuyN2IHsHAHhUwZ4bfwTRTMs1Mq42PhRy5cnIhU3nqaqCnGaiQwaFnL08JBqjJFagNq14nolkxAwWY", - "MwNZvauHg0HD7FqYXKNwyGgFbq3ZHpdmMzDLabSA6EfNs1E5VK6vUYxcoFhp5lbKBJhAOwA1Ol3b3yFw", - "2igQc7OoORsc/9fztZbYaIcGE7N1VK5tm3zcgkqdLLzjMcjmzzALZ43S/aeC86alUAvg80W9i05OPb13", - "7nlI9TFMfRSnUolD7DaPfoBpGhmOTn0rVpKco2TNmk8AyTVMWT6ftjTGwBvsH60wOcvnpL1Hujk1Otmd", - "UnunyU8eN1IxHIxeVp7+j883NRsU6WBGe3u3MSPPcLCXnw9sMP6O7uyq/enJsxqnuw3EYO0ChX43mVy1", - "7PBjMIwn9tu/FczomP6rX50T+sUhoV/u4psAC3UPWOWrBchXlvAYN06dkLiBVHdha9pbVVheOUslEKYU", - "W2IMPtqmgRBuYIlZXKyboI5XG2byelfST/+j/vqHAqF9aLUwVA4C/pFbn0FnUmhoYafeOmMfIObMz5Pb", - "2oTytDF6tF/rOqwAbudpA6/Qs58+GT7a34+arrlKfLkvKunc9ucoo51FRORF5oAHIprAL9NeiGiRix/b", - "FwLF/UJcOP1mIXrUHiD9AC2MzgiNEypAedHVgmgJciKxuldMMRfIUx1Rqj3TFrukf/jp4eS5HR7KXdGO", - "26AiqEZP13s20Nida08ioxp7mVh+mtHxt/uNXN1vQLzxiPxeRugmQOXmnRpo3bJxcn9UooiZTOy/XdS3", - "cThXhaSXqS3Wu69239g+5qrbmjJROy48zfG2Plc1rnjCC1Hh3g+phjcQkJu0G4FsN1atnxS0YWnmh+rh", - "npTPO6AbX9A684JwGDfAI52iXHGzvLZ5dMjtxuUcmAJVXuYiB91fpZGFMRldrfCebSYdpXWkeIbNOaZn", - "grAsS7jrVmIkUbkgZ5ck4xkkXLhirJua30EGoOzzz7kQ6OgOlHa2BsfD44HNlsxAsIzTMX2Bf/VoxswC", - "YffxSvTIyKN16tfnDVsWBHEZry9wJ7Koh80gaGP3vLjKSmFAoFaaJ4ZnTJm+PZgcxcyw6nK7qx23u7Fd", - "1WtoJyH+4ZoNoxoNBg1cXlL737VNz7agamsz+q5X7DqPItB6liekEuvRl78RQrWFD/g/ZzH57Orh/A73", - "4/eLYLlZSMX/hBgdD1/sx3ERLHktDDdLMpGSvGdq7rI+Gv1WEBtnmU04lQgpzzsn+yo+XsQLlpBrUHeg", - "SHUoXI8oXCv94fTtZnXTozpPU6aWa2aTiSTIbavaD9ypt08GXCIua7JPOyB2eguw52FRP4AdpkX7tDgQ", - "dVeiItFInWlI1wXeVeAhEAIEdVcZ9Am73r8s2bbnV35oBUSMBk9xdkNSXnGG5w5SrThgPPHE2eI9x2HO", - "HObMM5kz7sXxRLorkgYp8QVGJynx+LcvUra/YtkzKeuH3gMpD6R8AlI6aiEp7ZF4i4XSu4h7kJKPOyLX", - "r/oOy+GBec+Eeba5G6th8Xq3nXJfCoGnXQGDb5sPzDsw75kwb82ildOyZjQq1T2Vt+AXicxjciHTNBfc", - "LMlbZuAnW9LibTTevetxvx8rYOnR3D09Tgr148iq09XN6q8AAAD///BmUXaaLQAA", + "H4sIAAAAAAAC/+xaX2/juBH/KgTbRye2c5du4bdNercXNLsXrJ3rwyIwGGls8yKRKkkldgN/94JD/aH+", + "RTaSuEDqJ1vScOY3w/kNh5SeaSDjRAoQRtPJM9XBCmKGfz/fXP2ilFT2f6JkAspwwCexXtofw00EdEK/", + "6iUdULNJ7IU2iosl3W4HVMG/U64gpJMfOORuUAwpdBfj5P2fEBi6HdALGW7mLA25nBs5N7A2tatEatME", + "hTL2z0KqmBk6ofdcMLWhnlUUaUAd0FiGEM15aIeHsGBpZMd7I79aAXIV9vrpUHie7uZNVxgWisUw58KA", + "SmTEDJei9V57SFDGyeuKa2clvisrQ351MgUMO3IJ6pWxGdBHHkLPpPyBIm1xrYRwr0h0hZPHbAk28u5P", + "7bI9iMuUh0wEMNcBs3C8KHw6PS9RfsnkyBTlCggije9dJNHKy8G4QpGWQDqEL2AZ+1hQDelH9Kq5FbBk", + "hj/CPFEyTkynjm+ZHLlxcm2q0tjNgZ4noNoUjj19aUzQQU1uQDW0eqmLasUCFGDMDCRVGoxHo5raXJhM", + "UbhNaQkuH9ntl2YLMJt5sILgoWLZqBRK01MUI5coVqi5lzICJlAPQOhbnNrrNnDaKBBLs6oYG53+3bOV", + "SzTSoVbJktwrl7b1krYDlXpZiNWhftnOwkVt6v5Wwvm1Y6JWwJerahadf/LG/eaetw19DVNfxalYYhG7", + "T4MHMHUl47NPvhYrSS5QsqLNJ4DkGuYsXc47EmPkrQTfrDD5nC5Jd470c+rsfH9KHZwmTzyshWI8Ovu5", + "tPQvfN4cWaNIDzO607uLGVEUz5cgQDED1Yt2VsRsPTfyAYSu9GNsTWbu7psv6HtVv402EM9r3eIU75LW", + "pnFADcSJ9ThV4A+aebd3LFz1aemJbdeUpAmutcVvR6f1vyoYfXT8dP6hVrj91qjWuWuZ6N9ms5uOrU4I", + "hvHI/vurggWd0L8Myw3TMNstDYvtTB1gNtwDVtrqAPIHi3iIvWwvJG4g1n3Y6vq8xvofTlMBhCnFNpXW", + "uw1QG25gkVld5klQxasNM2k1K+nv/6R+S4ICL20BfAMt9pFb30EnUmjoYKfeOWJfIeTMj5PrNtvi1FgN", + "tD/XVVgtuK+juBu18p7kGpvKvNKJJX+e6iqT3EpAbvUuhFKefk+d55MPucUjF7uGL0IvnnxQ3+z1q5bw", + "VEW+3K2KevfmKcpopxEReX454C0ezWBtuicpWKXiYffUQnE/tS7d+Hpq2YVwbaor4Nr0emicUAbK867i", + "RIeTM4n5esMUc4681z64bMx3aMX/z7eo5x9th1q03nv22s2urpmzLYndu5pGMqiwl4nN7ws6+fHciNVz", + "A+KdR+RrGaCZFirXj0tB645W0N0oRREzmdm7fdS3fjhTmaQXqR1WcDyF6y5z5RliEag9l9J6ecs377WD", + "x/alNTN/Vzs1fKmiuUrbcGS3smrtxKANixPfVQ/3rHjeA934gtaY54TD2ACPdApSxc1mauPokNtW7AKY", + "AlWc0yMH3a1CycqYhG6tDi4W0lFaB4onmJwT+lkQliQRd9lKjCQqFeTzFUl4AhEXbjLypOaPkAAo+/x7", + "KgQaegSlna7R6fh0ZKMlExAs4XRCf8JbA5ows0LYQzztPjHyJA99voOSuKPjUlyF+dn8TGbzYSMI2tgu", + "HldZKQwIHBWnkeEJU2Zot1onITOsfG/Rl467HcZvq3NoKyHecMmGXp2NRjVcXlCHf2obnl1BVdZmtF2d", + "sWkaBKD1Io1IKTagP78hhHJT0mL/goXku5sPZ3d8GLu3gqVmJRX/D4RoePzTYQxnzpJfhOFmQ2ZSkmum", + "li7qZ2dvCqKxO2vCKUVIsYM7P9Tk4+shwSIyBfUIipTb3LxE4VrpF6cfd9u7AdVpHDO1yZlNZpIgt+3Q", + "YcuLm+7KgEvEVUX2VQWCRVG2wveWir1eOm1rxw82nO9ZN6pL4LFwdBeOI2f35SxyjlRJh8xd4UEM7geh", + "havunIa+Y9b7J0G75vzWdy2DiN7ghs72JsX5bXsJwk1Gttd45+5kh/dqB+5PqqdYxzpzrDNvV2fchwoz", + "6U5LaqQsPh55kZT59yMHIWX3K70Dk/K4+B9J+e6kdNRCUkZRfJK/MO2m5HUUf8mFXmKk7/v65Onp6QSZ", + "maoIRCBDd5S4Bz973u0emJv+K5IjM4/MfDtmXkcxKQiGvDSwNjs0sN5Z+c7E3P8Uq3oaf2xTj7z7ILyz", + "yV3rUrNvSropd5sJvG9n2vqJy5F5R+Z9EOblLNq6UVaNxkFVS8WLqstIpiG5lHGcCm425Asz8MQ2NPtg", + "BF+P6clwGCpg8cnSPT2NsuGngR1Ot3fb/wYAAP//yh4IQxgzAAA=", } // GetSwagger returns the content of the embedded swagger specification file