Major additions: - All-in-One Docker image with Ollama + models bundled - Separate deployment option for existing Ollama installations - Changed default model from qwen3:8b to qwen3:14b - Comprehensive deployment documentation Files added: - Dockerfile: Basic app-only image - Dockerfile.allinone: Complete image with Ollama + models - docker-compose.yml: Easy deployment configuration - docker-entrypoint.sh: Startup script for all-in-one image - requirements.txt: Python dependencies - .dockerignore: Exclude unnecessary files from image Scripts: - export-ollama-models.sh: Export models from local Ollama - build-allinone.sh: Build complete offline-deployable image - build-and-export.sh: Build and export basic image Documentation: - DEPLOYMENT.md: Comprehensive deployment guide - QUICK_START.md: Quick reference for common tasks Configuration: - Updated config.py: DEFAULT_CHAT_MODEL = qwen3:14b - Updated frontend/opro.html: Page title to 系统提示词优化
36 lines
884 B
Bash
36 lines
884 B
Bash
#!/bin/bash
|
|
|
|
set -e
|
|
|
|
echo "Starting Ollama service..."
|
|
ollama serve &
|
|
|
|
# Wait for Ollama to be ready
|
|
echo "Waiting for Ollama to start..."
|
|
for i in {1..30}; do
|
|
if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then
|
|
echo "Ollama is ready!"
|
|
break
|
|
fi
|
|
echo "Waiting for Ollama... ($i/30)"
|
|
sleep 2
|
|
done
|
|
|
|
# Check if models exist, if not, show warning
|
|
echo "Checking for models..."
|
|
ollama list
|
|
|
|
if ! ollama list | grep -q "qwen3:14b"; then
|
|
echo "WARNING: qwen3:14b model not found!"
|
|
echo "The application requires qwen3:14b to function properly."
|
|
fi
|
|
|
|
if ! ollama list | grep -q "qwen3-embedding"; then
|
|
echo "WARNING: qwen3-embedding model not found!"
|
|
echo "The application requires qwen3-embedding:4b for embeddings."
|
|
fi
|
|
|
|
echo "Starting FastAPI application..."
|
|
exec uvicorn _qwen_xinference_demo.api:app --host 0.0.0.0 --port 8010
|
|
|