generated from runpod-workers/worker-template
-
Notifications
You must be signed in to change notification settings - Fork 124
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
0.5.3, any vllm arg as env var, refactor and fixes, moving away from …
…building separate image from vLLM fork
- Loading branch information
1 parent
a08d83f
commit 5bd6f3a
Showing
11 changed files
with
209 additions
and
223 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -1,27 +1,100 @@ | ||
import os | ||
from huggingface_hub import snapshot_download | ||
import json | ||
import logging | ||
import glob | ||
from shutil import rmtree | ||
from huggingface_hub import snapshot_download | ||
from utils import timer_decorator | ||
|
||
BASE_DIR = "/" | ||
TOKENIZER_PATTERNS = [["*.json", "tokenizer*"]] | ||
MODEL_PATTERNS = [["*.safetensors"], ["*.bin"], ["*.pt"]] | ||
|
||
def setup_env(): | ||
if os.getenv("TESTING_DOWNLOAD") == "1": | ||
BASE_DIR = "tmp" | ||
os.makedirs(BASE_DIR, exist_ok=True) | ||
os.environ.update({ | ||
"HF_HOME": f"{BASE_DIR}/hf_cache", | ||
"MODEL_NAME": "openchat/openchat-3.5-0106", | ||
"HF_HUB_ENABLE_HF_TRANSFER": "1", | ||
"TENSORIZE": "1", | ||
"TENSORIZER_NUM_GPUS": "1", | ||
"DTYPE": "auto" | ||
}) | ||
|
||
@timer_decorator | ||
def download(name, revision, type, cache_dir): | ||
if type == "model": | ||
pattern_sets = [model_pattern + TOKENIZER_PATTERNS[0] for model_pattern in MODEL_PATTERNS] | ||
elif type == "tokenizer": | ||
pattern_sets = TOKENIZER_PATTERNS | ||
else: | ||
raise ValueError(f"Invalid type: {type}") | ||
try: | ||
for pattern_set in pattern_sets: | ||
path = snapshot_download(name, revision=revision, cache_dir=cache_dir, | ||
allow_patterns=pattern_set) | ||
for pattern in pattern_set: | ||
if glob.glob(os.path.join(path, pattern)): | ||
logging.info(f"Successfully downloaded {pattern} model files.") | ||
return path | ||
except ValueError: | ||
raise ValueError(f"No patterns matching {pattern_sets} found for download.") | ||
|
||
|
||
# @timer_decorator | ||
# def tensorize_model(model_path): TODO: Add back once tensorizer is ready | ||
# from vllm.engine.arg_utils import EngineArgs | ||
# from vllm.model_executor.model_loader.tensorizer import TensorizerConfig, tensorize_vllm_model | ||
# from torch.cuda import device_count | ||
|
||
# tensorizer_num_gpus = int(os.getenv("TENSORIZER_NUM_GPUS", "1")) | ||
# if tensorizer_num_gpus > device_count(): | ||
# raise ValueError(f"TENSORIZER_NUM_GPUS ({tensorizer_num_gpus}) exceeds available GPUs ({device_count()})") | ||
|
||
# dtype = os.getenv("DTYPE", "auto") | ||
# serialized_dir = f"{BASE_DIR}/serialized_model" | ||
# os.makedirs(serialized_dir, exist_ok=True) | ||
# serialized_uri = f"{serialized_dir}/model{'-%03d' if tensorizer_num_gpus > 1 else ''}.tensors" | ||
|
||
# tensorize_vllm_model( | ||
# EngineArgs(model=model_path, tensor_parallel_size=tensorizer_num_gpus, dtype=dtype), | ||
# TensorizerConfig(tensorizer_uri=serialized_uri) | ||
# ) | ||
# logging.info("Successfully serialized model to %s", str(serialized_uri)) | ||
# logging.info("Removing HF Model files after serialization") | ||
# rmtree("/".join(model_path.split("/")[:-2])) | ||
# return serialized_uri, tensorizer_num_gpus, dtype | ||
|
||
if __name__ == "__main__": | ||
model_name = os.getenv("MODEL_NAME") | ||
if not model_name: | ||
raise ValueError("Must specify model name by adding --build-arg MODEL_NAME=<your model's repo>") | ||
revision = os.getenv("MODEL_REVISION") or None | ||
snapshot_download(model_name, revision=revision, cache_dir=os.getenv("HF_HOME")) | ||
setup_env() | ||
cache_dir = os.getenv("HF_HOME") | ||
model_name, model_revision = os.getenv("MODEL_NAME"), os.getenv("MODEL_REVISION") or None | ||
tokenizer_name, tokenizer_revision = os.getenv("TOKENIZER_NAME") or model_name, os.getenv("TOKENIZER_REVISION") or model_revision | ||
|
||
model_path = download(model_name, model_revision, "model", cache_dir) | ||
|
||
metadata = { | ||
"MODEL_NAME": model_path, | ||
"MODEL_REVISION": os.getenv("MODEL_REVISION"), | ||
"QUANTIZATION": os.getenv("QUANTIZATION"), | ||
} | ||
|
||
tokenizer_name = os.getenv("TOKENIZER_NAME") or None | ||
tokenizer_revision = os.getenv("TOKENIZER_REVISION") or None | ||
if tokenizer_name: | ||
snapshot_download(tokenizer_name, revision=tokenizer_revision, cache_dir=os.getenv("HF_HOME")) | ||
# if os.getenv("TENSORIZE") == "1": TODO: Add back once tensorizer is ready | ||
# serialized_uri, tensorizer_num_gpus, dtype = tensorize_model(model_path) | ||
# metadata.update({ | ||
# "MODEL_NAME": serialized_uri, | ||
# "TENSORIZER_URI": serialized_uri, | ||
# "TENSOR_PARALLEL_SIZE": tensorizer_num_gpus, | ||
# "DTYPE": dtype | ||
# }) | ||
|
||
# Create file with metadata of baked in model and/or tokenizer | ||
tokenizer_path = download(tokenizer_name, tokenizer_revision, "tokenizer", cache_dir) | ||
metadata.update({ | ||
"TOKENIZER_NAME": tokenizer_path, | ||
"TOKENIZER_REVISION": tokenizer_revision | ||
}) | ||
|
||
with open("/local_metadata.json", "w") as f: | ||
json.dump({ | ||
"model_name": model_name, | ||
"revision": revision, | ||
"tokenizer_name": tokenizer_name or model_name, | ||
"tokenizer_revision": tokenizer_revision or revision, | ||
"quantization": os.getenv("QUANTIZATION") | ||
}, f) | ||
|
||
with open(f"{BASE_DIR}/local_model_args.json", "w") as f: | ||
json.dump({k: v for k, v in metadata.items() if v not in (None, "")}, f) |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Oops, something went wrong.