-
Notifications
You must be signed in to change notification settings - Fork 842
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[WIP] Phi3poc #2301
Open
JessicaXYWang
wants to merge
26
commits into
microsoft:master
Choose a base branch
from
JessicaXYWang:phi3poc
base: master
Could not load branches
Branch not found: {{ refName }}
Loading
Could not load tags
Nothing to show
Loading
Are you sure you want to change the base?
Some commits from the old base branch may be removed from the timeline,
and old review comments may become outdated.
Open
[WIP] Phi3poc #2301
Changes from 23 commits
Commits
Show all changes
26 commits
Select commit
Hold shift + click to select a range
f0c2b00
poc
JessicaXYWang 603777a
poc
JessicaXYWang 47ae241
Merge branch 'master' into phi3poc
JessicaXYWang 23f8ca0
rename module
JessicaXYWang bb5b2b6
Merge branch 'phi3poc' of https://github.com/JessicaXYWang/SynapseML …
JessicaXYWang f235535
update dependency
JessicaXYWang f2ab308
Merge branch 'master' into phi3poc
JessicaXYWang 3ee9168
add set device type
JessicaXYWang b30f168
add Downloader
JessicaXYWang d760733
remove import
JessicaXYWang 6efa59c
Merge branch 'master' into phi3poc
JessicaXYWang c7397f3
update lm
JessicaXYWang e1105fd
Merge branch 'phi3poc' of https://github.com/JessicaXYWang/SynapseML …
JessicaXYWang e59a981
Merge branch 'master' into phi3poc
JessicaXYWang ff8ad7f
pyarrow version conflict
JessicaXYWang 56e623d
Merge branch 'phi3poc' of https://github.com/JessicaXYWang/SynapseML …
JessicaXYWang efa6aa0
update transformers version
JessicaXYWang 2f5338c
add dependency
JessicaXYWang ff89511
update transformers version
JessicaXYWang b3dc5da
add phi3 test
JessicaXYWang c0cd463
test missing transformers library
JessicaXYWang e3e331c
update databricks test
JessicaXYWang 382a20e
update databricks test
JessicaXYWang 0a0f80c
update db library
JessicaXYWang eac0293
update doc
JessicaXYWang 7a3e315
format
JessicaXYWang File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
298 changes: 298 additions & 0 deletions
298
core/src/main/python/synapse/ml/llm/HuggingFaceCausallmTransform.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,298 @@ | ||
from pyspark.ml import Transformer | ||
from pyspark.ml.param.shared import ( | ||
HasInputCol, | ||
HasOutputCol, | ||
Param, | ||
Params, | ||
TypeConverters, | ||
) | ||
from pyspark.sql import Row | ||
from pyspark.sql.functions import udf | ||
from pyspark.sql.types import StringType, StructType, StructField | ||
from pyspark.ml.util import DefaultParamsReadable, DefaultParamsWritable | ||
from transformers import AutoTokenizer, AutoModelForCausalLM | ||
from pyspark import keyword_only | ||
import re | ||
import os | ||
|
||
|
||
class _PeekableIterator: | ||
def __init__(self, iterable): | ||
self._iterator = iter(iterable) | ||
self._cache = [] | ||
|
||
def __iter__(self): | ||
return self | ||
|
||
def __next__(self): | ||
if self._cache: | ||
return self._cache.pop(0) | ||
else: | ||
return next(self._iterator) | ||
|
||
def peek(self, n=1): | ||
"""Peek at the next n elements without consuming them.""" | ||
while len(self._cache) < n: | ||
try: | ||
self._cache.append(next(self._iterator)) | ||
except StopIteration: | ||
break | ||
if n == 1: | ||
return self._cache[0] if self._cache else None | ||
else: | ||
return self._cache[:n] | ||
|
||
|
||
class _ModelParam: | ||
def __init__(self, **kwargs): | ||
self.param = {} | ||
self.param.update(kwargs) | ||
|
||
def get_param(self): | ||
return self.param | ||
|
||
|
||
class _ModelConfig: | ||
def __init__(self, **kwargs): | ||
self.config = {} | ||
self.config.update(kwargs) | ||
|
||
def get_config(self): | ||
return self.config | ||
|
||
def set_config(self, **kwargs): | ||
self.config.update(kwargs) | ||
|
||
|
||
def camel_to_snake(text): | ||
return re.sub(r"(?<!^)(?=[A-Z])", "_", text).lower() | ||
|
||
|
||
class HuggingFaceCausalLM( | ||
Transformer, HasInputCol, HasOutputCol, DefaultParamsReadable, DefaultParamsWritable | ||
): | ||
|
||
modelName = Param( | ||
Params._dummy(), | ||
"modelName", | ||
"model name", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
inputCol = Param( | ||
Params._dummy(), | ||
"inputCol", | ||
"input column", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
outputCol = Param( | ||
Params._dummy(), | ||
"outputCol", | ||
"output column", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
modelParam = Param( | ||
Params._dummy(), "modelParam", "Model Parameters, max_new_tokens" | ||
) | ||
modelConfig = Param( | ||
Params._dummy(), | ||
"modelConfig", | ||
"Model configuration, local_files_only, trust_remote_code", | ||
) | ||
cachePath = Param( | ||
Params._dummy(), | ||
"cachePath", | ||
"cache path for the model. could be a lakehouse path", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
deviceMap = Param( | ||
Params._dummy(), | ||
"deviceMap", | ||
"Specifies a model parameter for the device Map. For GPU usage with models such as Phi 3, set it to 'cuda'.", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
torchDtype = Param( | ||
Params._dummy(), | ||
"torchDtype", | ||
"Specifies a model parameter for the torch dtype. For GPU usage with models such as Phi 3, set it to 'auto'.", | ||
typeConverter=TypeConverters.toString, | ||
) | ||
|
||
@keyword_only | ||
def __init__( | ||
self, | ||
modelName=None, | ||
inputCol=None, | ||
outputCol=None, | ||
cachePath=None, | ||
deviceMap=None, | ||
torchDtype=None, | ||
): | ||
super(HuggingFaceCausalLM, self).__init__() | ||
self._setDefault( | ||
modelName=modelName, | ||
inputCol=inputCol, | ||
outputCol=outputCol, | ||
modelParam=_ModelParam(), | ||
modelConfig=_ModelConfig(), | ||
cachePath=None, | ||
deviceMap=None, | ||
torchDtype=None, | ||
) | ||
kwargs = self._input_kwargs | ||
self.setParams(**kwargs) | ||
|
||
@keyword_only | ||
def setParams(self): | ||
kwargs = self._input_kwargs | ||
return self._set(**kwargs) | ||
|
||
def setModelName(self, value): | ||
return self._set(modelName=value) | ||
|
||
def getModelName(self): | ||
return self.getOrDefault(self.modelName) | ||
|
||
def setInputCol(self, value): | ||
return self._set(inputCol=value) | ||
|
||
def getInputCol(self): | ||
return self.getOrDefault(self.inputCol) | ||
|
||
def setOutputCol(self, value): | ||
return self._set(outputCol=value) | ||
|
||
def getOutputCol(self): | ||
return self.getOrDefault(self.outputCol) | ||
|
||
def setModelParam(self, **kwargs): | ||
param = _ModelParam(**kwargs) | ||
return self._set(modelParam=param) | ||
|
||
def getModelParam(self): | ||
return self.getOrDefault(self.modelParam) | ||
|
||
def setModelConfig(self, **kwargs): | ||
config = _ModelConfig(**kwargs) | ||
return self._set(modelConfig=config) | ||
|
||
def getModelConfig(self): | ||
return self.getOrDefault(self.modelConfig) | ||
|
||
def setCachePath(self, value): | ||
return self._set(cachePath=value) | ||
|
||
def getCachePath(self): | ||
return self.getOrDefault(self.cachePath) | ||
|
||
def setDeviceMap(self, value): | ||
return self._set(deviceMap=value) | ||
|
||
def getDeviceMap(self): | ||
return self.getOrDefault(self.deviceMap) | ||
|
||
def setTorchDtype(self, value): | ||
return self._set(torchDtype=value) | ||
|
||
def getTorchDtype(self): | ||
return self.getOrDefault(self.torchDtype) | ||
|
||
def load_model(self): | ||
""" | ||
Loads model and tokenizer either from cache or the HuggingFace Hub | ||
""" | ||
model_name = self.getModelName() | ||
model_config = self.getModelConfig().get_config() | ||
device_map = self.getDeviceMap() | ||
torch_dtype = self.getTorchDtype() | ||
|
||
if device_map: | ||
model_config["device_map"] = device_map | ||
if torch_dtype: | ||
model_config["torch_dtype"] = torch_dtype | ||
|
||
if self.getCachePath(): | ||
|
||
hf_cache = self.getCachePath() | ||
if not os.path.isdir(hf_cache): | ||
raise NotADirectoryError(f"Directory does not exist: {hf_cache}") | ||
|
||
model = AutoModelForCausalLM.from_pretrained( | ||
hf_cache, local_files_only=True, **model_config | ||
) | ||
tokenizer = AutoTokenizer.from_pretrained(hf_cache, local_files_only=True) | ||
else: | ||
model = AutoModelForCausalLM.from_pretrained(model_name, **model_config) | ||
tokenizer = AutoTokenizer.from_pretrained(model_name) | ||
|
||
return model, tokenizer | ||
|
||
def _predict_single_complete(self, prompt, model, tokenizer): | ||
param = self.getModelParam().get_param() | ||
inputs = tokenizer(prompt, return_tensors="pt").input_ids | ||
outputs = model.generate(inputs, **param) | ||
decoded_output = tokenizer.batch_decode(outputs, skip_special_tokens=True)[0] | ||
return decoded_output | ||
|
||
def _predict_single_chat(self, prompt, model, tokenizer): | ||
param = self.getModelParam().get_param() | ||
if isinstance(prompt, list): | ||
chat = prompt | ||
else: | ||
chat = [{"role": "user", "content": prompt}] | ||
formatted_chat = tokenizer.apply_chat_template( | ||
chat, tokenize=False, add_generation_prompt=True | ||
) | ||
tokenized_chat = tokenizer( | ||
formatted_chat, return_tensors="pt", add_special_tokens=False | ||
) | ||
inputs = { | ||
key: tensor.to(model.device) for key, tensor in tokenized_chat.items() | ||
} | ||
merged_inputs = {**inputs, **param} | ||
outputs = model.generate(**merged_inputs) | ||
decoded_output = tokenizer.decode( | ||
outputs[0][inputs["input_ids"].size(1) :], skip_special_tokens=True | ||
) | ||
return decoded_output | ||
|
||
def _process_partition(self, iterator, task): | ||
"""Process each partition of the data.""" | ||
peekable_iterator = _PeekableIterator(iterator) | ||
try: | ||
first_row = peekable_iterator.peek() | ||
except StopIteration: | ||
return None | ||
|
||
model, tokenizer = self.load_model() | ||
|
||
for row in peekable_iterator: | ||
prompt = row[self.getInputCol()] | ||
if task == "chat": | ||
result = self._predict_single_chat(prompt, model, tokenizer) | ||
elif task == "complete": | ||
result = self._predict_single_complete(prompt, model, tokenizer) | ||
row_dict = row.asDict() | ||
row_dict[self.getOutputCol()] = result | ||
yield Row(**row_dict) | ||
|
||
def _transform(self, dataset): | ||
input_schema = dataset.schema | ||
output_schema = StructType( | ||
input_schema.fields + [StructField(self.getOutputCol(), StringType(), True)] | ||
) | ||
result_rdd = dataset.rdd.mapPartitions( | ||
lambda partition: self._process_partition(partition, "chat") | ||
) | ||
result_df = result_rdd.toDF(output_schema) | ||
return result_df | ||
|
||
def complete(self, dataset): | ||
input_schema = dataset.schema | ||
output_schema = StructType( | ||
input_schema.fields + [StructField(self.getOutputCol(), StringType(), True)] | ||
) | ||
result_rdd = dataset.rdd.mapPartitions( | ||
lambda partition: self._process_partition(partition, "complete") | ||
) | ||
result_df = result_rdd.toDF(output_schema) | ||
return result_df |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
1 change: 1 addition & 0 deletions
1
... Algorithms/Language Model/Quickstart - Apply Phi 3 Model with HuggingFace CausalLM.ipynb
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1 @@ | ||
{"cells":[{"cell_type":"markdown","source":["# Apply Phi3 model with HuggingFace Causal ML"],"metadata":{"nteract":{"transient":{"deleting":false}}},"id":"7a355394-5b22-4c09-8d4f-9467a2fcfce4"},{"cell_type":"markdown","source":["\n","\n","**HuggingFace** is a popular open-source platform that develops computation tools for building application using machine learning. It is widely known for its Transformers library which contains open-source implementation of transformer models for text, image, and audio task.\n","\n","[**Phi 3**](https://azure.microsoft.com/en-us/blog/introducing-phi-3-redefining-whats-possible-with-slms/) is a family of AI models developed by Microsoft, designed to redefine what is possible with small language models (SLMs). Phi-3 models are the most compatable and cost-effective SLMs, [outperforming models of the same size and even larger ones in language](https://news.microsoft.com/source/features/ai/the-phi-3-small-language-models-with-big-potential/?msockid=26355e446adb6dfa06484f956b686c27), reasoning, coding, and math benchmarks. \n","\n","<img src=\"https://pub-66c8c8c5ae474e9a9161c92b21de2f08.r2.dev/2024/04/The-Phi-3-small-language-models-with-big-potential-1.jpg\" alt=\"Phi 3 model performance\" width=\"600\">\n","\n","To make it easier to scale up causal language model prediction on a large dataset, we have integrated [HuggingFace Causal LM](https://huggingface.co/docs/transformers/tasks/language_modeling) with SynapseML. This integration makes it easy to use the Apache Spark distributed computing framework to process large data on text generation tasks.\n","\n","This tutorial shows hot to apply [phi3 model](https://huggingface.co/collections/microsoft/phi-3-6626e15e9585a200d2d761e3) at scale with no extra setting.\n"],"metadata":{"nteract":{"transient":{"deleting":false}},"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"aa35ae52-6a9e-458d-91ee-ae3962ab5b68"},{"cell_type":"code","source":["chats = [\n"," (1, \"fix grammar: helol mi friend\"),\n"," (2, \"What is SynapseML\"),\n"," (3, \"translate to Spanish: hello\"),\n","]\n","\n","chat_df = spark.createDataFrame(chats, [\"row_index\", \"content\"])\n","chat_df.show()"],"outputs":[{"output_type":"display_data","data":{"application/vnd.livy.statement-meta+json":{"spark_pool":null,"statement_id":9,"statement_ids":[9],"state":"finished","livy_statement_state":"available","session_id":"0c9f61cd-1288-4e0e-9c81-e054702855b3","normalized_state":"finished","queued_time":"2025-01-16T17:14:07.1864063Z","session_start_time":null,"execution_start_time":"2025-01-16T17:18:56.122231Z","execution_finish_time":"2025-01-16T17:19:03.4236677Z","parent_msg_id":"11078688-e7a5-4a37-8e95-6485d95aa809"},"text/plain":"StatementMeta(, 0c9f61cd-1288-4e0e-9c81-e054702855b3, 9, Finished, Available, Finished)"},"metadata":{}},{"output_type":"stream","name":"stdout","text":["+---------+--------------------+\n|row_index| content|\n+---------+--------------------+\n| 1|fix grammar: helo...|\n| 2| What is SynapseML|\n| 3|translate to Span...|\n+---------+--------------------+\n\n"]}],"execution_count":3,"metadata":{"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"7e76b540-466f-4ab3-9aa9-da8de5517fc1"},{"cell_type":"markdown","source":["## Define and Apply Phi3 model"],"metadata":{"nteract":{"transient":{"deleting":false}},"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"ac0687e7-6609-4af4-a1a4-c098cb404374"},{"cell_type":"code","source":["from synapse.ml.llm.HuggingFaceCausallmTransform import HuggingFaceCausalLM\n","\n","phi3_transformer = (\n"," HuggingFaceCausalLM()\n"," .setModelName(\"microsoft/Phi-3-mini-4k-instruct\")\n"," .setInputCol(\"content\")\n"," .setOutputCol(\"result\")\n"," .setModelParam(max_new_tokens=1000)\n"," .setModelConfig(local_files_only=False, trust_remote_code=True)\n",")\n","result_df = phi3_transformer.transform(chat_df).collect()\n","display(result_df)"],"outputs":[],"execution_count":null,"metadata":{"microsoft":{"language":"python","language_group":"synapse_pyspark"},"collapsed":false,"jupyter":{"outputs_hidden":true},"editable":true,"run_control":{"frozen":false}},"id":"f8db55d9-b89d-420f-80e9-618041def698"},{"cell_type":"markdown","source":["## Use local cache\n","\n","By caching the model, you can reduce initialization time. On Fabric, store the model in a Lakehouse and use setCachePath to load it."],"metadata":{"nteract":{"transient":{"deleting":false}},"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"4c839ac6-f92e-4615-a0c3-977a96231cc6"},{"cell_type":"code","source":["# %%sh\n","# azcopy copy \"https://mmlspark.blob.core.windows.net/huggingface/microsoft/Phi-3-mini-4k-instruct\" \"/lakehouse/default/Files/microsoft/\" --recursive=true"],"outputs":[],"execution_count":null,"metadata":{"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"9bc5edf1-35cb-45d6-b1dc-49a22a01484b"},{"cell_type":"code","source":["# phi3_transformer = (\n","# HuggingFaceCausalLM()\n","# .setCachePath(\"/lakehouse/default/Files/microsoft/Phi-3-mini-4k-instruct\")\n","# .setInputCol(\"content\")\n","# .setOutputCol(\"result\")\n","# .setModelParam(max_new_tokens=1000)\n","# )\n","# result_df = phi3_transformer.transform(chat_df).collect()\n","# display(result_df)"],"outputs":[],"execution_count":null,"metadata":{"microsoft":{"language":"python","language_group":"synapse_pyspark"}},"id":"ee52c891-3be2-48fe-87b3-648e299a794e"}],"metadata":{"kernel_info":{"name":"synapse_pyspark"},"kernelspec":{"name":"synapse_pyspark","language":"Python","display_name":"Synapse PySpark"},"language_info":{"name":"python"},"microsoft":{"language":"python","language_group":"synapse_pyspark","ms_spell_check":{"ms_spell_check_language":"en"}},"nteract":{"version":"[email protected]"},"spark_compute":{"compute_id":"/trident/default","session_options":{"conf":{"spark.synapse.nbs.session.timeout":"1200000"}}},"synapse_widget":{"version":"0.1","state":{}},"dependencies":{"lakehouse":{"default_lakehouse":"cf3f397e-6a87-43ab-b8e0-bb9342e11c7a","default_lakehouse_name":"jessiwang_phi3","default_lakehouse_workspace_id":"4751a5bb-6a44-4164-8b31-c3b6a4cf1f8d"},"environment":{}}},"nbformat":4,"nbformat_minor":5} | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. before checkin this style needs to be fixed with black . in the top level dir |
Oops, something went wrong.
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
there might already be one in library to use