-
Notifications
You must be signed in to change notification settings - Fork 484
/
Copy path11b_rvllm_infer.yaml
54 lines (49 loc) · 1.78 KB
/
11b_rvllm_infer.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
# Remote vLLM inference config for Llama 3.2 11B Vision Instruct.
#
# Requirements:
# - Run `pip install vllm`
# - Log into HF: `huggingface-cli login`
# - Request access to Llama 3.2: https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct
#
# Usage:
# oumi infer -i -c configs/recipes/vision/llama3_2_vision/inference/11b_rvllm_infer.yaml
#
# Sample python command to start vLLM server:
#
# Note, for models that vLLM supports LoRA-adapters adapt below line
# export USE_LORA='--enable-lora --lora-modules your_lora_adapter=path/to/your/lora/adapter'
# python -u -m vllm.entrypoints.openai.api_server \
# --port 6864 \
# --model meta-llama/Llama-3.2-11B-Vision-Instruct \
# --trust-remote-code \
# --dtype=bfloat16 \
# --device=cuda \
# --max-model-len 256 \
# --enforce-eager \
# --disable-custom-all-reduce \
# --disable-log-requests \
# --max_num_seqs=2 \
# --enable-chunked-prefill=false \
# --gpu-memory-utilization=0.95 \
# --tensor-parallel-size 1 \
# ${USE_LORA}
#
# See Also:
# - Documentation: https://oumi.ai/docs/en/latest/user_guides/infer/infer.html
# - Config class: oumi.core.configs.InferenceConfig
# - Config source: https://github.com/oumi-ai/oumi/blob/main/src/oumi/core/configs/inference_config.py
# - Other inference configs: configs/**/inference/
model:
model_name: "meta-llama/Llama-3.2-11B-Vision-Instruct"
torch_dtype_str: "bfloat16"
model_max_length: 1024
chat_template: "llama3-instruct"
trust_remote_code: True
generation:
batch_size: 1
max_new_tokens: 32
remote_params:
# This address is just an example (it's what you may get when you start vllm sever locally).
# For details, see https://platform.openai.com/docs/api-reference/chat/create
api_url: "http://localhost:6864/v1/chat/completions"
engine: REMOTE_VLLM