diff --git a/gamechangerml/api/fastapi/routers/search.py b/gamechangerml/api/fastapi/routers/search.py index cc84597b..72a62951 100644 --- a/gamechangerml/api/fastapi/routers/search.py +++ b/gamechangerml/api/fastapi/routers/search.py @@ -77,8 +77,28 @@ async def text_extract_infer(body: dict, extractType: str, response: Response) - raise return results +@router.post("/embedSemanticQuery", status_code=200) +async def semantic_search( + body: dict, + response: Response, +) -> dict: + """ + embedSemanticQuery - takes in a query and returns the txtai embeddings as a list of floats to be used by ElasticSearch + + Args: + (dict) json format of the search query.\n + query: (str, required) a string of any length to embed and use for semantic search.\n + Example: {"query": "Where is the science and technology reinvention laboratory?"} + Response: Response class; for status codes(apart of fastapi do not need to pass param) + Returns: + results: list of floats + """ + query_text = body["query"] + embeddings = MODELS.semantic_searcher.embed_query(query_text) + return list(embeddings.astype(float)) + -@ router.post("/semanticSearch", status_code=200) +@router.post("/semanticSearch", status_code=200) async def semantic_search( body: dict, response: Response, diff --git a/gamechangerml/src/search/sent_transformer/model.py b/gamechangerml/src/search/sent_transformer/model.py index 3d89cd2d..6a37a449 100644 --- a/gamechangerml/src/search/sent_transformer/model.py +++ b/gamechangerml/src/search/sent_transformer/model.py @@ -470,4 +470,8 @@ def search( return top_results else: - return [] \ No newline at end of file + return [] + + def embed_query(self, query_text): + embeddings = self.embedder.transform(query_text) + return embeddings \ No newline at end of file