From 26f8e0c6484f268f3482fa97d9b79627356b8fb6 Mon Sep 17 00:00:00 2001 From: leehwui Date: Mon, 8 Dec 2025 10:10:38 +0800 Subject: [PATCH] feat: add Docker support for offline deployment with qwen3:14b MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Major additions: - All-in-One Docker image with Ollama + models bundled - Separate deployment option for existing Ollama installations - Changed default model from qwen3:8b to qwen3:14b - Comprehensive deployment documentation Files added: - Dockerfile: Basic app-only image - Dockerfile.allinone: Complete image with Ollama + models - docker-compose.yml: Easy deployment configuration - docker-entrypoint.sh: Startup script for all-in-one image - requirements.txt: Python dependencies - .dockerignore: Exclude unnecessary files from image Scripts: - export-ollama-models.sh: Export models from local Ollama - build-allinone.sh: Build complete offline-deployable image - build-and-export.sh: Build and export basic image Documentation: - DEPLOYMENT.md: Comprehensive deployment guide - QUICK_START.md: Quick reference for common tasks Configuration: - Updated config.py: DEFAULT_CHAT_MODEL = qwen3:14b - Updated frontend/opro.html: Page title to 系统提示词优化 --- .dockerignore | 26 +++ DEPLOYMENT.md | 358 ++++++++++++++++++++++++++++++++++++++++ Dockerfile | 38 +++++ Dockerfile.allinone | 50 ++++++ QUICK_START.md | 117 +++++++++++++ build-allinone.sh | 98 +++++++++++ build-and-export.sh | 37 +++++ config.py | 2 +- docker-compose.yml | 23 +++ docker-entrypoint.sh | 35 ++++ export-ollama-models.sh | 105 ++++++++++++ frontend/opro.html | 4 +- requirements.txt | 7 + 13 files changed, 897 insertions(+), 3 deletions(-) create mode 100644 .dockerignore create mode 100644 DEPLOYMENT.md create mode 100644 Dockerfile create mode 100644 Dockerfile.allinone create mode 100644 QUICK_START.md create mode 100755 build-allinone.sh create mode 100755 build-and-export.sh create mode 100644 docker-compose.yml create mode 100644 docker-entrypoint.sh create mode 100755 export-ollama-models.sh create mode 100644 requirements.txt diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 0000000..5747494 --- /dev/null +++ b/.dockerignore @@ -0,0 +1,26 @@ +__pycache__ +*.pyc +*.pyo +*.pyd +.Python +*.so +*.egg +*.egg-info +dist +build +.git +.gitignore +.vscode +.idea +*.md +!README.md +local_docs +examples +outputs +.DS_Store +*.log +.env +.venv +venv +env + diff --git a/DEPLOYMENT.md b/DEPLOYMENT.md new file mode 100644 index 0000000..7c6060f --- /dev/null +++ b/DEPLOYMENT.md @@ -0,0 +1,358 @@ +# Docker 部署指南 + +本文档说明如何在无外网访问的服务器上部署系统提示词优化工具。 + +## 部署方案 + +本项目提供两种部署方案: + +### 方案 A: All-in-One 镜像(推荐,适用于无外网服务器) + +**优点**: +- 包含所有依赖:应用代码 + Ollama + LLM 模型 +- 一个镜像文件,部署简单 +- 无需在目标服务器上安装任何额外软件(除了 Docker) + +**缺点**: +- 镜像文件很大(10-20GB) +- 传输时间较长 + +### 方案 B: 分离部署(适用于已有 Ollama 的服务器) + +**优点**: +- 镜像文件较小(~500MB) +- 可以复用现有的 Ollama 服务 + +**缺点**: +- 需要在目标服务器上单独安装和配置 Ollama +- 需要手动下载模型 + +--- + +## 方案 A: All-in-One 部署(推荐) + +### 前置要求 + +#### 在开发机器上(有外网访问) + +1. **Docker** 已安装 +2. **Ollama** 已安装并运行 +3. **磁盘空间**:至少 30GB 可用空间 +4. 已下载所需的 Ollama 模型: + - `qwen3:14b` (主模型,~8GB) + - `qwen3-embedding:4b` (嵌入模型,~2GB) + +#### 在目标服务器上(无外网访问) + +1. **Docker** 已安装 +2. **磁盘空间**:至少 25GB 可用空间 + +### 部署步骤 + +#### 步骤 1: 下载所需的 Ollama 模型 + +在开发机器上,确保已下载所需模型: + +```bash +# 下载主模型(约 8GB) +ollama pull qwen3:14b + +# 下载嵌入模型(约 2GB) +ollama pull qwen3-embedding:4b + +# 验证模型已下载 +ollama list +``` + +#### 步骤 2: 导出 Ollama 模型 + +```bash +# 运行导出脚本 +./export-ollama-models.sh +``` + +这将创建 `ollama-models/` 目录,包含所有模型文件。 + +#### 步骤 3: 构建 All-in-One Docker 镜像 + +```bash +# 运行构建脚本(推荐) +./build-allinone.sh + +# 或手动构建 +docker build -f Dockerfile.allinone -t system-prompt-optimizer:allinone . +``` + +**注意**:构建过程可能需要 10-30 分钟,取决于机器性能。 + +#### 步骤 4: 导出 Docker 镜像 + +如果使用 `build-allinone.sh`,镜像已自动导出。否则手动导出: + +```bash +# 导出镜像(约 10-20GB) +docker save -o system-prompt-optimizer-allinone.tar system-prompt-optimizer:allinone + +# 验证文件大小 +ls -lh system-prompt-optimizer-allinone.tar +``` + +#### 步骤 5: 传输到目标服务器 + +使用 scp、U盘或其他方式传输镜像文件: + +```bash +# 使用 scp(如果网络可达) +scp system-prompt-optimizer-allinone.tar user@server:/path/ + +# 或使用 rsync(支持断点续传) +rsync -avP --progress system-prompt-optimizer-allinone.tar user@server:/path/ + +# 或使用 U盘/移动硬盘物理传输 +``` + +#### 步骤 6: 在目标服务器上加载镜像 + +```bash +# 加载镜像(需要几分钟) +docker load -i system-prompt-optimizer-allinone.tar + +# 验证镜像已加载 +docker images | grep system-prompt-optimizer +``` + +#### 步骤 7: 启动服务 + +```bash +# 启动容器 +docker run -d \ + --name system-prompt-optimizer \ + -p 8010:8010 \ + -p 11434:11434 \ + -v $(pwd)/outputs:/app/outputs \ + --restart unless-stopped \ + system-prompt-optimizer:allinone + +# 查看启动日志 +docker logs -f system-prompt-optimizer +``` + +**重要**:首次启动需要等待 30-60 秒,Ollama 服务需要初始化。 + +#### 步骤 8: 验证部署 + +```bash +# 等待服务启动(约 30-60 秒) +sleep 60 + +# 健康检查 +curl http://localhost:8010/health + +# 应该返回: +# {"status":"ok","version":"0.1.0"} + +# 检查 Ollama 服务 +curl http://localhost:11434/api/tags + +# 检查可用模型 +curl http://localhost:8010/models + +# 访问 Web 界面 +# 浏览器打开: http://<服务器IP>:8010/ui/opro.html +``` + +--- + +## 方案 B: 分离部署 + +### 前置要求 + +#### 在目标服务器上 + +1. **Docker** 已安装 +2. **Ollama** 服务已安装并运行 +3. 已拉取所需的 Ollama 模型: + - `qwen3:14b` (主模型) + - `qwen3-embedding:4b` (嵌入模型) + +### 部署步骤 + +#### 步骤 1: 构建应用镜像 + +```bash +# 在开发机器上构建 +docker build -t system-prompt-optimizer:latest . + +# 导出镜像 +docker save -o system-prompt-optimizer.tar system-prompt-optimizer:latest +``` + +#### 步骤 2: 传输并加载 + +```bash +# 传输到目标服务器 +scp system-prompt-optimizer.tar user@server:/path/ + +# 在目标服务器上加载 +docker load -i system-prompt-optimizer.tar +``` + +#### 步骤 3: 启动服务 + +```bash +# 使用 Docker Compose +docker-compose up -d + +# 或使用 Docker 命令 +docker run -d \ + --name system-prompt-optimizer \ + -p 8010:8010 \ + -e OLLAMA_HOST=http://host.docker.internal:11434 \ + -v $(pwd)/outputs:/app/outputs \ + --add-host host.docker.internal:host-gateway \ + --restart unless-stopped \ + system-prompt-optimizer:latest +``` + +## 配置说明 + +### 环境变量 + +在 `docker-compose.yml` 或 `docker run` 命令中可以配置以下环境变量: + +- `OLLAMA_HOST`: Ollama 服务地址(默认: `http://host.docker.internal:11434`) +- `PYTHONUNBUFFERED`: Python 输出缓冲(默认: `1`) + +### 端口映射 + +- **8010**: Web 界面和 API 端口 + +### 数据持久化 + +- `./outputs`: 用户反馈日志存储目录(映射到容器内 `/app/outputs`) + +## 故障排查 + +### 1. 无法连接 Ollama 服务 + +**问题**: 容器内无法访问宿主机的 Ollama 服务 + +**解决方案**: +```bash +# 确保使用了 --add-host 参数 +--add-host host.docker.internal:host-gateway + +# 或者直接使用宿主机 IP +-e OLLAMA_HOST=http://192.168.1.100:11434 +``` + +### 2. 模型不可用(All-in-One 部署) + +**问题**: 容器内模型未正确加载 + +**解决方案**: +```bash +# 进入容器检查 +docker exec -it system-prompt-optimizer bash + +# 在容器内检查模型 +ollama list + +# 如果模型不存在,检查模型目录 +ls -la /root/.ollama/models/ + +# 退出容器 +exit +``` + +如果模型确实丢失,可能需要重新构建镜像。 + +### 3. 模型不可用(分离部署) + +**问题**: Ollama 模型未安装 + +**解决方案**: +```bash +# 在宿主机上拉取模型 +ollama pull qwen3:14b +ollama pull qwen3-embedding:4b + +# 验证模型已安装 +ollama list +``` + +### 4. 容器启动失败 + +**问题**: 端口被占用或权限问题 + +**解决方案**: +```bash +# 检查端口占用 +netstat -tulpn | grep 8010 +netstat -tulpn | grep 11434 + +# 更换端口(All-in-One 需要两个端口) +docker run -p 8011:8010 -p 11435:11434 ... + +# 查看容器日志 +docker logs system-prompt-optimizer +``` + +### 5. 性能问题 + +**问题**: 生成速度慢 + +**解决方案**: +- 确保 Ollama 使用 GPU 加速 +- 使用更小的模型(如 `qwen3:4b`) +- 调整 `config.py` 中的 `GENERATION_POOL_SIZE` + +## 更新部署 + +```bash +# 1. 在开发机器上重新构建镜像 +docker build -t system-prompt-optimizer:latest . + +# 2. 导出新镜像 +docker save -o system-prompt-optimizer-new.tar system-prompt-optimizer:latest + +# 3. 传输到服务器并加载 +docker load -i system-prompt-optimizer-new.tar + +# 4. 重启服务 +docker-compose down +docker-compose up -d + +# 或使用 docker 命令 +docker stop system-prompt-optimizer +docker rm system-prompt-optimizer +docker run -d ... # 使用相同的启动命令 +``` + +## 安全建议 + +1. **网络隔离**: 如果不需要外部访问,只绑定到 localhost + ```bash + -p 127.0.0.1:8010:8010 + ``` + +2. **防火墙**: 配置防火墙规则限制访问 + ```bash + # 只允许特定 IP 访问 + iptables -A INPUT -p tcp --dport 8010 -s 192.168.1.0/24 -j ACCEPT + iptables -A INPUT -p tcp --dport 8010 -j DROP + ``` + +3. **日志管理**: 定期清理日志文件 + ```bash + # 限制 Docker 日志大小 + docker run --log-opt max-size=10m --log-opt max-file=3 ... + ``` + +## 联系支持 + +如有问题,请查看: +- 应用日志: `docker logs system-prompt-optimizer` +- Ollama 日志: `journalctl -u ollama -f` +- API 文档: http://localhost:8010/docs + diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..c9045dd --- /dev/null +++ b/Dockerfile @@ -0,0 +1,38 @@ +FROM python:3.10-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies +RUN apt-get update && apt-get install -y \ + curl \ + && rm -rf /var/lib/apt/lists/* + +# Copy requirements file +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY _qwen_xinference_demo/ ./_qwen_xinference_demo/ +COPY frontend/ ./frontend/ +COPY config.py . + +# Create outputs directory +RUN mkdir -p outputs + +# Expose port +EXPOSE 8010 + +# Set environment variables +ENV PYTHONUNBUFFERED=1 +ENV OLLAMA_HOST=http://host.docker.internal:11434 + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \ + CMD curl -f http://localhost:8010/health || exit 1 + +# Run the application +CMD ["uvicorn", "_qwen_xinference_demo.api:app", "--host", "0.0.0.0", "--port", "8010"] + diff --git a/Dockerfile.allinone b/Dockerfile.allinone new file mode 100644 index 0000000..4fa0671 --- /dev/null +++ b/Dockerfile.allinone @@ -0,0 +1,50 @@ +FROM python:3.10-slim + +# Set working directory +WORKDIR /app + +# Install system dependencies including curl for Ollama +RUN apt-get update && apt-get install -y \ + curl \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* + +# Install Ollama +RUN curl -fsSL https://ollama.com/install.sh | sh + +# Copy requirements file +COPY requirements.txt . + +# Install Python dependencies +RUN pip install --no-cache-dir -r requirements.txt + +# Copy application code +COPY _qwen_xinference_demo/ ./_qwen_xinference_demo/ +COPY frontend/ ./frontend/ +COPY config.py . + +# Create necessary directories +RUN mkdir -p outputs /root/.ollama + +# Copy pre-downloaded Ollama models +# This includes qwen3:14b and qwen3-embedding:4b +COPY ollama-models/ /root/.ollama/ + +# Expose ports +EXPOSE 8010 11434 + +# Set environment variables +ENV PYTHONUNBUFFERED=1 +ENV OLLAMA_HOST=http://localhost:11434 + +# Copy startup script +COPY docker-entrypoint.sh /docker-entrypoint.sh +RUN chmod +x /docker-entrypoint.sh + +# Health check +HEALTHCHECK --interval=30s --timeout=10s --start-period=40s --retries=3 \ + CMD curl -f http://localhost:8010/health && curl -f http://localhost:11434/api/tags || exit 1 + +# Run the startup script +ENTRYPOINT ["/docker-entrypoint.sh"] + diff --git a/QUICK_START.md b/QUICK_START.md new file mode 100644 index 0000000..df9dac9 --- /dev/null +++ b/QUICK_START.md @@ -0,0 +1,117 @@ +# 快速开始指南 + +## 离线部署(All-in-One 方案) + +### 在开发机器上(有外网) + +```bash +# 1. 下载模型 +ollama pull qwen3:14b +ollama pull qwen3-embedding:4b + +# 2. 导出模型 +./export-ollama-models.sh + +# 3. 构建并导出 Docker 镜像 +./build-allinone.sh + +# 4. 传输到目标服务器 +# 文件: system-prompt-optimizer-allinone.tar (约 10-20GB) +scp system-prompt-optimizer-allinone.tar user@server:/path/ +``` + +### 在目标服务器上(无外网) + +```bash +# 1. 加载镜像 +docker load -i system-prompt-optimizer-allinone.tar + +# 2. 启动服务 +docker run -d \ + --name system-prompt-optimizer \ + -p 8010:8010 \ + -p 11434:11434 \ + -v $(pwd)/outputs:/app/outputs \ + --restart unless-stopped \ + system-prompt-optimizer:allinone + +# 3. 等待启动(约 60 秒) +sleep 60 + +# 4. 验证 +curl http://localhost:8010/health +curl http://localhost:11434/api/tags + +# 5. 访问界面 +# http://<服务器IP>:8010/ui/opro.html +``` + +## 常用命令 + +```bash +# 查看日志 +docker logs -f system-prompt-optimizer + +# 重启服务 +docker restart system-prompt-optimizer + +# 停止服务 +docker stop system-prompt-optimizer + +# 删除容器 +docker rm -f system-prompt-optimizer + +# 进入容器 +docker exec -it system-prompt-optimizer bash + +# 检查模型 +docker exec -it system-prompt-optimizer ollama list +``` + +## 端口说明 + +- **8010**: Web 界面和 API +- **11434**: Ollama 服务(仅 All-in-One 方案需要暴露) + +## 文件说明 + +- `system-prompt-optimizer-allinone.tar`: 完整镜像(10-20GB) +- `outputs/`: 用户反馈日志目录 + +## 故障排查 + +### 服务无法启动 + +```bash +# 查看日志 +docker logs system-prompt-optimizer + +# 检查端口占用 +netstat -tulpn | grep 8010 +netstat -tulpn | grep 11434 +``` + +### 模型不可用 + +```bash +# 进入容器检查 +docker exec -it system-prompt-optimizer ollama list + +# 应该看到: +# qwen3:14b +# qwen3-embedding:4b +``` + +### 性能慢 + +- 确保服务器有足够的 RAM(建议 16GB+) +- 如果有 GPU,使用支持 GPU 的 Docker 运行时 +- 调整 `config.py` 中的 `GENERATION_POOL_SIZE` + +## 更多信息 + +详细文档请参考: +- `DEPLOYMENT.md`: 完整部署指南 +- `README.md`: 项目说明 +- http://localhost:8010/docs: API 文档 + diff --git a/build-allinone.sh b/build-allinone.sh new file mode 100755 index 0000000..55e154f --- /dev/null +++ b/build-allinone.sh @@ -0,0 +1,98 @@ +#!/bin/bash + +# Build all-in-one Docker image with Ollama and models +# This creates a complete offline-deployable image + +set -e + +IMAGE_NAME="system-prompt-optimizer" +IMAGE_TAG="allinone" +EXPORT_FILE="${IMAGE_NAME}-${IMAGE_TAG}.tar" + +echo "==========================================" +echo "Building All-in-One Docker Image" +echo "==========================================" +echo "" +echo "This will create a Docker image containing:" +echo " - Python application" +echo " - Ollama service" +echo " - qwen3:14b model" +echo " - qwen3-embedding:4b model" +echo "" +echo "WARNING: The final image will be 10-20GB in size!" +echo "" + +# Check if ollama-models directory exists +if [ ! -d "ollama-models" ]; then + echo "ERROR: ollama-models directory not found!" + echo "" + echo "Please run ./export-ollama-models.sh first to export the models." + exit 1 +fi + +echo "✓ Found ollama-models directory" +echo "" + +# Check disk space +AVAILABLE_SPACE=$(df -h . | awk 'NR==2 {print $4}') +echo "Available disk space: $AVAILABLE_SPACE" +echo "Required: ~20GB for build process" +echo "" + +read -p "Continue with build? (y/n) " -n 1 -r +echo +if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo "Build cancelled." + exit 1 +fi + +echo "" +echo "==========================================" +echo "Building Docker image..." +echo "==========================================" +docker build -f Dockerfile.allinone -t ${IMAGE_NAME}:${IMAGE_TAG} . + +echo "" +echo "==========================================" +echo "Build complete!" +echo "==========================================" +docker images | grep ${IMAGE_NAME} + +echo "" +echo "==========================================" +echo "Exporting image to ${EXPORT_FILE}..." +echo "==========================================" +echo "This will take several minutes..." +docker save -o ${EXPORT_FILE} ${IMAGE_NAME}:${IMAGE_TAG} + +echo "" +echo "==========================================" +echo "Export complete!" +echo "==========================================" +ls -lh ${EXPORT_FILE} + +echo "" +echo "==========================================" +echo "Deployment Instructions" +echo "==========================================" +echo "" +echo "1. Transfer ${EXPORT_FILE} to target server:" +echo " scp ${EXPORT_FILE} user@server:/path/" +echo "" +echo "2. On target server, load the image:" +echo " docker load -i ${EXPORT_FILE}" +echo "" +echo "3. Run the container:" +echo " docker run -d \\" +echo " --name system-prompt-optimizer \\" +echo " -p 8010:8010 \\" +echo " -p 11434:11434 \\" +echo " -v \$(pwd)/outputs:/app/outputs \\" +echo " --restart unless-stopped \\" +echo " ${IMAGE_NAME}:${IMAGE_TAG}" +echo "" +echo "4. Access the application:" +echo " http://:8010/ui/opro.html" +echo "" +echo "See DEPLOYMENT.md for more details." + diff --git a/build-and-export.sh b/build-and-export.sh new file mode 100755 index 0000000..de1475f --- /dev/null +++ b/build-and-export.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Build and export Docker image for offline deployment +# Usage: ./build-and-export.sh + +set -e + +IMAGE_NAME="system-prompt-optimizer" +IMAGE_TAG="latest" +EXPORT_FILE="${IMAGE_NAME}.tar" + +echo "==========================================" +echo "Building Docker image..." +echo "==========================================" +docker build -t ${IMAGE_NAME}:${IMAGE_TAG} . + +echo "" +echo "==========================================" +echo "Exporting Docker image to ${EXPORT_FILE}..." +echo "==========================================" +docker save -o ${EXPORT_FILE} ${IMAGE_NAME}:${IMAGE_TAG} + +echo "" +echo "==========================================" +echo "Export complete!" +echo "==========================================" +ls -lh ${EXPORT_FILE} + +echo "" +echo "Next steps:" +echo "1. Transfer ${EXPORT_FILE} to target server" +echo "2. Transfer docker-compose.yml to target server (optional)" +echo "3. On target server, run: docker load -i ${EXPORT_FILE}" +echo "4. On target server, run: docker-compose up -d" +echo "" +echo "See DEPLOYMENT.md for detailed instructions." + diff --git a/config.py b/config.py index 00fa928..29a9c8b 100644 --- a/config.py +++ b/config.py @@ -7,7 +7,7 @@ APP_CONTACT = {"name": "OPRO Team", "url": "http://127.0.0.1:8010/ui/"} OLLAMA_HOST = "http://127.0.0.1:11434" OLLAMA_GENERATE_URL = f"{OLLAMA_HOST}/api/generate" OLLAMA_TAGS_URL = f"{OLLAMA_HOST}/api/tags" -DEFAULT_CHAT_MODEL = "qwen3:8b" +DEFAULT_CHAT_MODEL = "qwen3:14b" DEFAULT_EMBED_MODEL = "qwen3-embedding:4b" # Xinference diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 0000000..76ec756 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,23 @@ +version: '3.8' + +services: + app: + build: . + container_name: system-prompt-optimizer + ports: + - "8010:8010" + environment: + - OLLAMA_HOST=http://host.docker.internal:11434 + - PYTHONUNBUFFERED=1 + volumes: + - ./outputs:/app/outputs + restart: unless-stopped + extra_hosts: + - "host.docker.internal:host-gateway" + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8010/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 5s + diff --git a/docker-entrypoint.sh b/docker-entrypoint.sh new file mode 100644 index 0000000..93141c7 --- /dev/null +++ b/docker-entrypoint.sh @@ -0,0 +1,35 @@ +#!/bin/bash + +set -e + +echo "Starting Ollama service..." +ollama serve & + +# Wait for Ollama to be ready +echo "Waiting for Ollama to start..." +for i in {1..30}; do + if curl -s http://localhost:11434/api/tags > /dev/null 2>&1; then + echo "Ollama is ready!" + break + fi + echo "Waiting for Ollama... ($i/30)" + sleep 2 +done + +# Check if models exist, if not, show warning +echo "Checking for models..." +ollama list + +if ! ollama list | grep -q "qwen3:14b"; then + echo "WARNING: qwen3:14b model not found!" + echo "The application requires qwen3:14b to function properly." +fi + +if ! ollama list | grep -q "qwen3-embedding"; then + echo "WARNING: qwen3-embedding model not found!" + echo "The application requires qwen3-embedding:4b for embeddings." +fi + +echo "Starting FastAPI application..." +exec uvicorn _qwen_xinference_demo.api:app --host 0.0.0.0 --port 8010 + diff --git a/export-ollama-models.sh b/export-ollama-models.sh new file mode 100755 index 0000000..6d5991f --- /dev/null +++ b/export-ollama-models.sh @@ -0,0 +1,105 @@ +#!/bin/bash + +# Export Ollama models for offline deployment +# This script copies Ollama models from your local machine +# so they can be bundled into the Docker image +# +# Required models: +# - qwen3:14b (main chat model) +# - qwen3-embedding:4b (embedding model) + +set -e + +MODELS_DIR="ollama-models" +OLLAMA_MODELS_PATH="$HOME/.ollama" + +echo "==========================================" +echo "Exporting Ollama models for offline deployment" +echo "==========================================" + +# Check if Ollama is installed +if ! command -v ollama &> /dev/null; then + echo "ERROR: Ollama is not installed or not in PATH" + exit 1 +fi + +# Check if required models are available +echo "" +echo "Checking for required models..." +MISSING_MODELS=0 + +if ! ollama list | grep -q "qwen3:14b"; then + echo "ERROR: qwen3:14b model not found!" + echo "Please run: ollama pull qwen3:14b" + MISSING_MODELS=1 +fi + +if ! ollama list | grep -q "qwen3-embedding:4b"; then + echo "ERROR: qwen3-embedding:4b model not found!" + echo "Please run: ollama pull qwen3-embedding:4b" + MISSING_MODELS=1 +fi + +if [ $MISSING_MODELS -eq 1 ]; then + echo "" + echo "Please download the required models first:" + echo " ollama pull qwen3:14b" + echo " ollama pull qwen3-embedding:4b" + exit 1 +fi + +echo "✓ All required models found" + +# Check if Ollama directory exists +if [ ! -d "$OLLAMA_MODELS_PATH" ]; then + echo "ERROR: Ollama directory not found at $OLLAMA_MODELS_PATH" + exit 1 +fi + +# Create export directory +echo "" +echo "Creating export directory: $MODELS_DIR" +rm -rf "$MODELS_DIR" +mkdir -p "$MODELS_DIR" + +echo "" +echo "Copying Ollama data from $OLLAMA_MODELS_PATH to $MODELS_DIR..." +echo "This may take several minutes (models are large)..." + +# Copy the entire .ollama directory structure +cp -r "$OLLAMA_MODELS_PATH"/* "$MODELS_DIR/" + +echo "" +echo "==========================================" +echo "Models exported successfully!" +echo "==========================================" +du -sh "$MODELS_DIR" + +echo "" +echo "Directory structure:" +ls -lh "$MODELS_DIR/" + +echo "" +echo "Models included:" +if [ -d "$MODELS_DIR/models/manifests/registry.ollama.ai/library" ]; then + ls -lh "$MODELS_DIR/models/manifests/registry.ollama.ai/library/" +fi + +echo "" +echo "==========================================" +echo "Next steps:" +echo "==========================================" +echo "1. Build the all-in-one Docker image:" +echo " ./build-allinone.sh" +echo "" +echo "2. Or manually:" +echo " docker build -f Dockerfile.allinone -t system-prompt-optimizer:allinone ." +echo "" +echo "3. Export the image:" +echo " docker save -o system-prompt-optimizer-allinone.tar system-prompt-optimizer:allinone" +echo "" +echo "4. Transfer to target server:" +echo " scp system-prompt-optimizer-allinone.tar user@server:/path/" +echo "" +echo "Note: The final Docker image will be very large (10-20GB) due to the models." + diff --git a/frontend/opro.html b/frontend/opro.html index 7e4b4e0..8921df7 100644 --- a/frontend/opro.html +++ b/frontend/opro.html @@ -6,7 +6,7 @@ - OPRO - System Instruction Optimizer + 系统提示词优化 @@ -509,7 +509,7 @@ // Header React.createElement('div', { className: 'px-4 py-3 border-b border-gray-200 bg-white flex items-center gap-3' }, React.createElement('h1', { className: 'text-lg font-normal text-gray-800' }, - 'OPRO' + '系统提示词优化' ), currentSessionId && React.createElement('div', { className: 'text-sm text-gray-500' }, sessions.find(s => s.session_id === currentSessionId)?.session_name || '当前会话' diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..6e201bf --- /dev/null +++ b/requirements.txt @@ -0,0 +1,7 @@ +fastapi==0.109.0 +uvicorn==0.27.0 +requests==2.31.0 +numpy==1.26.3 +scikit-learn==1.4.0 +pydantic==2.5.3 +