Skip to content

Add prompt_to_lora_id_mapping adjustment in fix_prompts() on release/v1.19 #250

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
37 changes: 32 additions & 5 deletions QEfficient/generation/text_generation_inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,32 +192,55 @@ def get_input_prompts(prompt: str, prompts_txt_file_path: str) -> List[str]:
return prompt


def fix_prompts(prompt: List[str], batch_size: int, full_batch_size: int = None):
def fix_prompts(
prompt: List[str],
batch_size: int,
prompt_to_lora_id_mapping: Optional[List[int]] = None,
full_batch_size: int = None,
):
"""
Adjusts the list of prompts to match the required batch size.
Adjusts the list of prompts and prompt_to_lora_id_mapping to match the required batch size.

``Mandatory`` Args:
prompt (List[str]): List of input prompts.
batch_size (int): The batch size to process at a time.

``Optional`` Args:
prompt_to_lora_id_mapping (Optional[List[int]]): Mapping to associate prompts with their respective LoRA adapter.
full_batch_size (Optional[int]): The full batch size if different from batch_size.

Returns:
List[str]: Adjusted list of prompts.
List[int]: Adjusted list of prompt_to_lora_id_mapping.
"""
exec_batch_size = full_batch_size if full_batch_size is not None else batch_size

if len(prompt) < exec_batch_size:
logger.warning("Number of prompts are less than batch size/full batch size, repeating to required batch size")
prompt = (prompt * (exec_batch_size // len(prompt) + 1))[:exec_batch_size]
if prompt_to_lora_id_mapping is not None:
logger.warning(
"Prompt_to_lora_id_mapping are less than batch size/full batch size, repeating to required batch size"
)
prompt_to_lora_id_mapping = (
prompt_to_lora_id_mapping * (exec_batch_size // len(prompt_to_lora_id_mapping) + 1)
)[:exec_batch_size]
elif full_batch_size is None and len(prompt) % batch_size != 0:
logger.warning(
"Number of prompts are not multiple of batch size, dropping last incomplete batch from given input prompts"
)
prompt = prompt[: batch_size * (len(prompt) // batch_size)]

return prompt
if prompt_to_lora_id_mapping is not None:
logger.warning(
"prompt_to_lora_id_mapping are not multiple of batch size, dropping last incomplete batch from given input prompts"
)
prompt_to_lora_id_mapping = prompt_to_lora_id_mapping[
: batch_size * (len(prompt_to_lora_id_mapping) // batch_size)
]
if prompt_to_lora_id_mapping is not None:
return prompt, prompt_to_lora_id_mapping
else:
return prompt


def read_prompts_txt_file(prompts_txt_file_path: str):
Expand Down Expand Up @@ -311,7 +334,11 @@ def cloud_ai_100_exec_kv(
"""
batch_size, ctx_len, full_batch_size = get_compilation_dims(qpc_path)
prompt: List[str] = get_input_prompts(prompt, prompts_txt_file_path)
prompt = fix_prompts(prompt, batch_size, full_batch_size)
if prompt_to_lora_id_mapping:
prompt, prompt_to_lora_id_mapping = fix_prompts(prompt, batch_size, prompt_to_lora_id_mapping, full_batch_size)
else:
prompt = fix_prompts(prompt, batch_size, prompt_to_lora_id_mapping, full_batch_size)

generate_text = TextGeneration(
tokenizer=tokenizer,
qpc_path=qpc_path,
Expand Down