-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdocker-compose.yml
More file actions
125 lines (105 loc) · 2.86 KB
/
docker-compose.yml
File metadata and controls
125 lines (105 loc) · 2.86 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
services:
vllm-cpu:
build:
context: .
dockerfile: Dockerfile
image: vllm-cpu-optimized:latest
container_name: vllm-smollm2
# Command to serve the model (arguments passed to the entrypoint)
command:
- --model
- ${MODEL_NAME:-HuggingFaceTB/SmolLM2-360M-Instruct}
- --host
- "0.0.0.0"
- --port
- "8000"
- --dtype
- ${DTYPE:-auto}
- --max-model-len
- ${MAX_MODEL_LEN:-2048}
- --max-num-seqs
- ${MAX_NUM_SEQS:-8}
- --tensor-parallel-size
- "1"
- --pipeline-parallel-size
- "1"
- --disable-log-requests
- --trust-remote-code
ports:
- "${VLLM_PORT:-8009}:8000"
environment:
# vLLM CPU-specific settings (inherited from Dockerfile)
- VLLM_TARGET_DEVICE=cpu
- VLLM_CPU_KVCACHE_SPACE=${KVCACHE_SPACE:-1}
# Thread control for optimal CPU performance
- OMP_NUM_THREADS=${OMP_THREADS:-2}
- OPENBLAS_NUM_THREADS=1
- MKL_NUM_THREADS=1
# HuggingFace settings
- HF_HOME=/workspace/.cache/huggingface
- TRANSFORMERS_CACHE=/workspace/.cache/huggingface
# Optional: HuggingFace token for gated models
# - HF_TOKEN=${HF_TOKEN}
volumes:
# Cache HuggingFace models to avoid re-downloading
- hf-cache:/workspace/.cache/huggingface
# Optional: Mount local models directory
# - ./models:/workspace/models:ro
# Resource limits optimized for macOS
deploy:
resources:
limits:
cpus: '${CPU_LIMIT:-4.0}'
memory: ${MEMORY_LIMIT:-8G}
reservations:
cpus: '${CPU_RESERVATION:-2.0}'
memory: ${MEMORY_RESERVATION:-4G}
# Health check
healthcheck:
test: ["CMD", "curl", "-f", "http://localhost:8000/health"]
interval: 30s
timeout: 10s
retries: 3
start_period: 60s
# Restart policy
restart: unless-stopped
# Network configuration
networks:
- vllm-network
# Gradio Chatbot Interface
chatbot:
build:
context: .
dockerfile: Dockerfile.chatbot
image: vllm-chatbot:latest
container_name: vllm-chatbot
ports:
- "7860:7860"
environment:
- VLLM_BASE_URL=http://vllm-cpu:8000/v1
- MODEL_NAME=${MODEL_NAME:-HuggingFaceTB/SmolLM2-360M-Instruct}
depends_on:
vllm-cpu:
condition: service_healthy
restart: unless-stopped
networks:
- vllm-network
# Optional: Simple web UI for testing (uncomment to enable)
# webui:
# image: ghcr.io/open-webui/open-webui:main
# container_name: vllm-webui
# ports:
# - "3000:8080"
# environment:
# - OPENAI_API_BASE=http://vllm-cpu:8000/v1
# - OPENAI_API_KEY=dummy
# depends_on:
# - vllm-cpu
# networks:
# - vllm-network
volumes:
hf-cache:
driver: local
networks:
vllm-network:
driver: bridge