Spaces:
Runtime error
Runtime error
Add .env.template
Browse files- .env.template +103 -0
.env.template
ADDED
@@ -0,0 +1,103 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# =============================================================================
|
2 |
+
# REQUIRED CONFIGURATION
|
3 |
+
# =============================================================================
|
4 |
+
# HF中国镜像站 token with read/write permissions for repositories and inference API
|
5 |
+
# Get it from: https://huggingface.co/settings/tokens
|
6 |
+
HF_TOKEN=hg_...
|
7 |
+
|
8 |
+
# -----------------------------------------------------------------------------
|
9 |
+
# GENERATION SETTINGS
|
10 |
+
# -----------------------------------------------------------------------------
|
11 |
+
MAX_NUM_TOKENS=2048
|
12 |
+
MAX_NUM_ROWS=1000
|
13 |
+
DEFAULT_BATCH_SIZE=5
|
14 |
+
|
15 |
+
# Required for chat data generation with Llama or Qwen models
|
16 |
+
# Options: "llama3", "qwen2", or custom template string
|
17 |
+
#MAGPIE_PRE_QUERY_TEMPLATE=qwen2
|
18 |
+
|
19 |
+
|
20 |
+
|
21 |
+
# =============================================================================
|
22 |
+
# MODEL & SERVICES CONFIGURATION
|
23 |
+
# =============================================================================
|
24 |
+
|
25 |
+
# -----------------------------------------------------------------------------
|
26 |
+
# A. STANDALONE SETUP (No additional installation required)
|
27 |
+
# -----------------------------------------------------------------------------
|
28 |
+
|
29 |
+
# 1. HF中国镜像站 SERVERLESS (Recommended default)
|
30 |
+
# Just requires HF_TOKEN
|
31 |
+
# MODEL=meta-llama/Llama-3.1-8B-Instruct
|
32 |
+
# MODEL=Qwen/Qwen2.5-1.5B-Instruct
|
33 |
+
|
34 |
+
# 2. ARGILLA ON HF中国镜像站 SPACES (Recommended for data annotation)
|
35 |
+
# ARGILLA_API_URL=https://daqc-my-argilla.hf.space/
|
36 |
+
#ARGILLA_API_KEY=
|
37 |
+
|
38 |
+
# 3. OPENAI API
|
39 |
+
# Requires OpenAI API key
|
40 |
+
# OPENAI_BASE_URL=https://api.openai.com/v1/
|
41 |
+
# MODEL=gpt-4
|
42 |
+
# API_KEY=
|
43 |
+
|
44 |
+
# -----------------------------------------------------------------------------
|
45 |
+
# B. LOCAL SETUP (Requires local installation)
|
46 |
+
# -----------------------------------------------------------------------------
|
47 |
+
|
48 |
+
# 1. LOCAL OLLAMA
|
49 |
+
# Requires: Ollama installed (https://ollama.ai)
|
50 |
+
#OLLAMA_BASE_URL=http://127.0.0.1:11434/
|
51 |
+
#MODEL=qwen2.5:32b-instruct-q5_K_S
|
52 |
+
#TOKENIZER_ID=Qwen/Qwen2.5-32B-Instruct
|
53 |
+
|
54 |
+
# MODEL=deepseek-r1:1.5b
|
55 |
+
# TOKENIZER_ID=deepseek-r1:1.5b
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
# 2. LOCAL VLLM
|
60 |
+
# Requires: VLLM installed
|
61 |
+
# VLLM_BASE_URL=http://127.0.0.1:8000/
|
62 |
+
# MODEL=Qwen/Qwen2.5-1.5B-Instruct
|
63 |
+
# TOKENIZER_ID=Qwen/Qwen2.5-1.5B-Instruct
|
64 |
+
|
65 |
+
# 3. LOCAL TGI/ENDPOINTS
|
66 |
+
# Requires: Text Generation Inference installed
|
67 |
+
# HUGGINGFACE_BASE_URL=http://127.0.0.1:3000/
|
68 |
+
# TOKENIZER_ID=meta-llama/Llama-3.1-8B-Instruct
|
69 |
+
|
70 |
+
|
71 |
+
# -----------------------------------------------------------------------------
|
72 |
+
# C. DOCKER SETUP (Ready to use with docker-compose, recommended for full setup)
|
73 |
+
# -----------------------------------------------------------------------------
|
74 |
+
|
75 |
+
# 1. DOCKER OLLAMA
|
76 |
+
OLLAMA_BASE_URL=http://ollama:11434
|
77 |
+
# Options for OLLAMA_HARDWARE: latest (for CPU/NVIDIA), rocm (for AMD)
|
78 |
+
OLLAMA_HARDWARE=latest
|
79 |
+
|
80 |
+
|
81 |
+
# DEEPSEEK R1
|
82 |
+
#MODEL=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
|
83 |
+
#TOKENIZER_ID=deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B
|
84 |
+
#MAGPIE_PRE_QUERY_TEMPLATE= "<|begin▁of▁sentence|>User: " # use the custom template for the model
|
85 |
+
|
86 |
+
#LLAMA3.2
|
87 |
+
MODEL=llama3.2:1b # model for instruction generation
|
88 |
+
TOKENIZER_ID=meta-llama/Llama-3.2-1B-Instruct # tokenizer for instruction generation
|
89 |
+
MAGPIE_PRE_QUERY_TEMPLATE=llama3 # magpie template required for instruction generation
|
90 |
+
|
91 |
+
|
92 |
+
# 2. DOCKER ARGILLA (persistent data)
|
93 |
+
ARGILLA_API_URL=http://argilla:6900
|
94 |
+
ARGILLA_USERNAME=admin
|
95 |
+
ARGILLA_PASSWORD=admin1234
|
96 |
+
ARGILLA_API_KEY=admin.1234
|
97 |
+
ARGILLA_REINDEX_DATASET=1
|
98 |
+
|
99 |
+
# Usage:
|
100 |
+
#docker-compose --profile with-ollama --profile with-argilla build
|
101 |
+
#(open new terminal) docker-compose --profile with-ollama up -d
|
102 |
+
# docker-compose exec ollama ollama run llama3.2:1b
|
103 |
+
#docker-compose --profile with-ollama --profile with-argilla up -d
|