- Changed export-ollama-models.sh to selectively copy only qwen3:14b and qwen3-embedding:4b - Parses manifest files to identify required blob files - Significantly reduces Docker image size by excluding unrelated models - Added summary showing which models were skipped This prevents accidentally including other models (like deepseek-r1, bge-m3, etc.) that may exist in the user's Ollama directory but are not needed for the project.
169 lines
5.1 KiB
Bash
Executable File
169 lines
5.1 KiB
Bash
Executable File
#!/bin/bash
|
|
|
|
# Export Ollama models for offline deployment
|
|
# This script copies Ollama models from your local machine
|
|
# so they can be bundled into the Docker image
|
|
#
|
|
# Required models:
|
|
# - qwen3:14b (main chat model)
|
|
# - qwen3-embedding:4b (embedding model)
|
|
|
|
set -e
|
|
|
|
MODELS_DIR="ollama-models"
|
|
OLLAMA_MODELS_PATH="$HOME/.ollama"
|
|
|
|
echo "=========================================="
|
|
echo "Exporting Ollama models for offline deployment"
|
|
echo "=========================================="
|
|
|
|
# Check if Ollama is installed
|
|
if ! command -v ollama &> /dev/null; then
|
|
echo "ERROR: Ollama is not installed or not in PATH"
|
|
exit 1
|
|
fi
|
|
|
|
# Check if required models are available
|
|
echo ""
|
|
echo "Checking for required models..."
|
|
MISSING_MODELS=0
|
|
|
|
if ! ollama list | grep -q "qwen3:14b"; then
|
|
echo "ERROR: qwen3:14b model not found!"
|
|
echo "Please run: ollama pull qwen3:14b"
|
|
MISSING_MODELS=1
|
|
fi
|
|
|
|
if ! ollama list | grep -q "qwen3-embedding:4b"; then
|
|
echo "ERROR: qwen3-embedding:4b model not found!"
|
|
echo "Please run: ollama pull qwen3-embedding:4b"
|
|
MISSING_MODELS=1
|
|
fi
|
|
|
|
if [ $MISSING_MODELS -eq 1 ]; then
|
|
echo ""
|
|
echo "Please download the required models first:"
|
|
echo " ollama pull qwen3:14b"
|
|
echo " ollama pull qwen3-embedding:4b"
|
|
exit 1
|
|
fi
|
|
|
|
echo "✓ All required models found"
|
|
|
|
# Check if Ollama directory exists
|
|
if [ ! -d "$OLLAMA_MODELS_PATH" ]; then
|
|
echo "ERROR: Ollama directory not found at $OLLAMA_MODELS_PATH"
|
|
exit 1
|
|
fi
|
|
|
|
# Create export directory structure
|
|
echo ""
|
|
echo "Creating export directory: $MODELS_DIR"
|
|
rm -rf "$MODELS_DIR"
|
|
mkdir -p "$MODELS_DIR/models/manifests/registry.ollama.ai/library"
|
|
mkdir -p "$MODELS_DIR/models/blobs"
|
|
|
|
echo ""
|
|
echo "Copying only required models (qwen3:14b and qwen3-embedding:4b)..."
|
|
echo "This may take several minutes (models are large)..."
|
|
|
|
# Function to get blob hashes from manifest
|
|
get_blobs_from_manifest() {
|
|
local manifest_file=$1
|
|
# Extract all sha256 hashes from the manifest JSON
|
|
grep -oE 'sha256:[a-f0-9]{64}' "$manifest_file" 2>/dev/null | sed 's/sha256://' | sort -u
|
|
}
|
|
|
|
# Function to copy model files
|
|
copy_model() {
|
|
local model_name=$1
|
|
local model_tag=$2
|
|
local manifest_dir="$OLLAMA_MODELS_PATH/models/manifests/registry.ollama.ai/library/$model_name"
|
|
|
|
if [ ! -d "$manifest_dir" ]; then
|
|
echo "ERROR: Model manifest not found: $manifest_dir"
|
|
return 1
|
|
fi
|
|
|
|
echo " Copying $model_name:$model_tag manifest..."
|
|
mkdir -p "$MODELS_DIR/models/manifests/registry.ollama.ai/library/$model_name"
|
|
|
|
# Copy the specific tag manifest
|
|
if [ -f "$manifest_dir/$model_tag" ]; then
|
|
cp "$manifest_dir/$model_tag" "$MODELS_DIR/models/manifests/registry.ollama.ai/library/$model_name/"
|
|
|
|
# Get all blob hashes referenced in this manifest
|
|
echo " Finding blob files for $model_name:$model_tag..."
|
|
local blob_hashes=$(get_blobs_from_manifest "$manifest_dir/$model_tag")
|
|
local blob_count=0
|
|
|
|
for blob_hash in $blob_hashes; do
|
|
local blob_file="$OLLAMA_MODELS_PATH/models/blobs/sha256-$blob_hash"
|
|
if [ -f "$blob_file" ]; then
|
|
cp "$blob_file" "$MODELS_DIR/models/blobs/" 2>/dev/null
|
|
blob_count=$((blob_count + 1))
|
|
fi
|
|
done
|
|
|
|
echo " ✓ $model_name:$model_tag copied ($blob_count blobs)"
|
|
else
|
|
echo "ERROR: Manifest file not found: $manifest_dir/$model_tag"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
# Copy required models with specific tags
|
|
copy_model "qwen3" "14b" || exit 1
|
|
copy_model "qwen3-embedding" "4b" || exit 1
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Models exported successfully!"
|
|
echo "=========================================="
|
|
echo ""
|
|
echo "Total size:"
|
|
du -sh "$MODELS_DIR"
|
|
|
|
echo ""
|
|
echo "Models included:"
|
|
if [ -d "$MODELS_DIR/models/manifests/registry.ollama.ai/library" ]; then
|
|
ls -lh "$MODELS_DIR/models/manifests/registry.ollama.ai/library/"
|
|
fi
|
|
|
|
echo ""
|
|
echo "Blob files:"
|
|
if [ -d "$MODELS_DIR/models/blobs" ]; then
|
|
echo " Total blobs: $(ls -1 "$MODELS_DIR/models/blobs" | wc -l)"
|
|
du -sh "$MODELS_DIR/models/blobs"
|
|
fi
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Summary"
|
|
echo "=========================================="
|
|
echo "✓ Only qwen3:14b and qwen3-embedding:4b were exported"
|
|
echo ""
|
|
echo "Models in your Ollama that were NOT copied:"
|
|
ollama list | grep -v "qwen3:14b" | grep -v "qwen3-embedding:4b" | tail -n +2 || echo " (none)"
|
|
echo ""
|
|
echo "This keeps the Docker image size minimal!"
|
|
|
|
echo ""
|
|
echo "=========================================="
|
|
echo "Next steps:"
|
|
echo "=========================================="
|
|
echo "1. Build the all-in-one Docker image:"
|
|
echo " ./build-allinone.sh"
|
|
echo ""
|
|
echo "2. Or manually:"
|
|
echo " docker build -f Dockerfile.allinone -t system-prompt-optimizer:allinone ."
|
|
echo ""
|
|
echo "3. Export the image:"
|
|
echo " docker save -o system-prompt-optimizer-allinone.tar system-prompt-optimizer:allinone"
|
|
echo ""
|
|
echo "4. Transfer to target server:"
|
|
echo " scp system-prompt-optimizer-allinone.tar user@server:/path/"
|
|
echo ""
|
|
echo "Note: The final Docker image will be very large (10-20GB) due to the models."
|
|
|