ai-servers/new-api/init-channels.sh

161 lines
6.3 KiB
Bash
Executable file

#!/usr/bin/env bash
# Configures new-api channels and token via the admin API.
# Run once after first boot: ./new-api/init-channels.sh
#
# Requires these env vars (or .env file in project root):
# NEW_API_ACCESS_TOKEN - admin access token (set via INITIAL_ROOT_ACCESS_TOKEN)
# DEEPINFRA_API_KEY
# SILICONFLOW_API_KEY
# OPENROUTER_API_KEY
# GROQ_API_KEY
# CEREBRAS_API_KEY
# OPENWEBUI_API_KEY - token for Open WebUI to authenticate with new-api
set -euo pipefail
SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
ENV_FILE="${SCRIPT_DIR}/../.env"
# Load .env if present
if [[ -f "$ENV_FILE" ]]; then
set -a
# shellcheck disable=SC1090
source "$ENV_FILE"
set +a
fi
API_BASE="${NEW_API_BASE:-http://localhost:4000}"
TOKEN="${NEW_API_ACCESS_TOKEN:?Set NEW_API_ACCESS_TOKEN (from INITIAL_ROOT_ACCESS_TOKEN)}"
# ── Helper ──────────────────────────────────────────────
create_channel() {
local name="$1" type="$2" key="$3" base_url="$4" priority="$5" models="$6" model_mapping="$7"
echo "Creating channel: ${name} (priority ${priority})..."
local payload
payload=$(python3 -c "
import json, sys
print(json.dumps({
'type': int(sys.argv[1]),
'name': sys.argv[2],
'key': sys.argv[3],
'base_url': sys.argv[4],
'models': sys.argv[5],
'model_mapping': sys.argv[6],
'priority': int(sys.argv[7]),
'status': 1,
'group': 'default',
'weight': 1,
'auto_ban': 1
}))
" "$type" "$name" "$key" "$base_url" "$models" "$model_mapping" "$priority")
local resp http_code body
resp=$(curl -s -w "\n%{http_code}" \
"${API_BASE}/api/channel/" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
-d "$payload")
http_code=$(echo "$resp" | tail -1)
body=$(echo "$resp" | sed '$d')
if [[ "$http_code" == "200" ]]; then
echo " OK"
else
echo " FAILED (HTTP ${http_code})"
echo " ${body}" | head -c 500
echo
fi
}
# Wait for new-api to be ready
echo "Waiting for new-api at ${API_BASE}..."
for i in $(seq 1 30); do
if curl -sf "${API_BASE}/api/status" > /dev/null 2>&1; then
echo "new-api is ready."
break
fi
if [[ "$i" == "30" ]]; then
echo "ERROR: new-api did not become ready in time."
exit 1
fi
sleep 2
done
# ── Channel: DeepInfra (priority 1) ────────────────────
create_channel "DeepInfra" 1 \
"${DEEPINFRA_API_KEY:?}" \
"https://api.deepinfra.com/v1/openai" \
1 \
"deepseek-v3.2,deepseek-r1,gpt-oss,gpt-oss-20b,nemotron-super,nemotron-nano,devstral,glm-4.6,glm-4.7,glm-5,kimi-k2,kimi-k2.5" \
'{"deepseek-v3.2":"deepseek-ai/DeepSeek-V3.2","deepseek-r1":"deepseek-ai/DeepSeek-R1","gpt-oss":"openai/gpt-oss-120b","gpt-oss-20b":"openai/gpt-oss-20b","nemotron-super":"nvidia/Llama-3.3-Nemotron-Super-49B-v1.5","nemotron-nano":"nvidia/NVIDIA-Nemotron-Nano-9B-v2","devstral":"mistralai/Devstral-Small-2505","glm-4.6":"zai-org/GLM-4.6","glm-4.7":"zai-org/GLM-4.7","glm-5":"zai-org/GLM-5","kimi-k2":"moonshotai/Kimi-K2-Instruct-0905","kimi-k2.5":"moonshotai/Kimi-K2.5"}'
# ── Channel: SiliconFlow (priority 2) ──────────────────
create_channel "SiliconFlow" 1 \
"${SILICONFLOW_API_KEY:?}" \
"https://api.siliconflow.com/v1" \
2 \
"deepseek-v3.2,glm-4.7,kimi-k2,qwen3-coder,qwen3-coder-30b" \
'{"deepseek-v3.2":"deepseek-ai/DeepSeek-V3.2","glm-4.7":"THUDM/GLM-4-32B-0414","kimi-k2":"moonshotai/Kimi-K2-Instruct-0905","qwen3-coder":"Qwen/Qwen3-Coder-480B-A35B-Instruct","qwen3-coder-30b":"Qwen/Qwen3-Coder-30B-A3B-Instruct"}'
# ── Channel: OpenRouter (priority 3) ───────────────────
create_channel "OpenRouter" 1 \
"${OPENROUTER_API_KEY:?}" \
"https://openrouter.ai/api/v1" \
3 \
"deepseek-v3.2,deepseek-v3-free,kimi-k2.5,minimax-m2.5,gpt-4.1-mini,gpt-4.1,gemini-3-flash-preview,gemini-2.5-pro,claude-sonnet,trinity-large-preview" \
'{"deepseek-v3.2":"deepseek/deepseek-chat-v3-0324","deepseek-v3-free":"deepseek/deepseek-chat-v3-0324:free","kimi-k2.5":"moonshotai/kimi-k2.5","minimax-m2.5":"minimax/minimax-m2.5","gpt-4.1-mini":"openai/gpt-4.1-mini","gpt-4.1":"openai/gpt-4.1","gemini-3-flash-preview":"google/gemini-3-flash-preview","gemini-2.5-pro":"google/gemini-2.5-pro-preview","claude-sonnet":"anthropic/claude-sonnet-4","trinity-large-preview":"arcee-ai/trinity-large-preview"}'
# ── Channel: Groq (priority 1) ─────────────────────────
create_channel "Groq" 1 \
"${GROQ_API_KEY:?}" \
"https://api.groq.com/openai/v1" \
1 \
"llama-3.3-70b" \
'{"llama-3.3-70b":"llama-3.3-70b-versatile"}'
# ── Channel: Cerebras (priority 1) ─────────────────────
create_channel "Cerebras" 1 \
"${CEREBRAS_API_KEY:?}" \
"https://api.cerebras.ai/v1" \
1 \
"llama-3.3-70b-cerebras" \
'{"llama-3.3-70b-cerebras":"llama-3.3-70b"}'
# ── Create API token for Open WebUI ────────────────────
if [[ -n "${OPENWEBUI_API_KEY:-}" ]]; then
echo ""
echo "Creating API token for Open WebUI..."
TOKEN_RESP=$(curl -s "${API_BASE}/api/token/" \
-H "Authorization: Bearer ${TOKEN}" \
-H "Content-Type: application/json" \
-d "$(python3 -c "
import json
print(json.dumps({
'name': 'open-webui',
'remain_quota': 0,
'unlimited_quota': True
}))
")")
echo "Token response: ${TOKEN_RESP}" | head -c 500
echo ""
echo ""
echo "NOTE: Use the token 'key' from the response above as OPENAI_API_KEY in Open WebUI."
echo " Or create a token manually in the new-api UI."
fi
echo ""
echo "══════════════════════════════════════"
echo "Channel setup complete!"
echo ""
echo "Next steps:"
echo " 1. Verify channels at ${API_BASE} (login: root / 123456 — CHANGE THIS)"
echo " 2. Test a model:"
echo " curl ${API_BASE}/v1/chat/completions \\"
echo " -H 'Authorization: Bearer <token>' \\"
echo " -H 'Content-Type: application/json' \\"
echo " -d '{\"model\":\"deepseek-v3.2\",\"messages\":[{\"role\":\"user\",\"content\":\"hi\"}]}'"
echo " 3. Check Open WebUI can see models"
echo "══════════════════════════════════════"