Commit e5582ab0 authored by Alex4309h's avatar Alex4309h

chore: 清理不需要的文件

移除 Docker 部署文件、垃圾文件、原项目图片资源,
更新 .gitignore 和 README
parent 285fbae7
# Version control
.git
.gitignore
.gitattributes
# Documentation
README.md
readme_en.md
docs/
*.md
# Development files
.vscode/
.idea/
*.swp
*.swo
*~
# Python
__pycache__/
*.py[cod]
*$py.class
*.so
.Python
env/
venv/
.venv/
ENV/
env.bak/
venv.bak/
.pytest_cache/
.coverage
.tox/
.cache
nosetests.xml
coverage.xml
*.cover
.hypothesis/
# Jupyter
.ipynb_checkpoints
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
pip-wheel-metadata/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# Logs
logs/
*.log
npm-debug.log*
yarn-debug.log*
yarn-error.log*
lerna-debug.log*
# Runtime data
pids
*.pid
*.seed
*.pid.lock
# Directory for instrumented libs generated by jscoverage/JSCover
lib-cov
# Coverage directory used by tools like istanbul
coverage
*.lcov
# nyc test coverage
.nyc_output
# Grunt intermediate storage (https://gruntjs.com/creating-plugins#storing-task-files)
.grunt
# Bower dependency directory (https://bower.io/)
bower_components
# node_modules
node_modules/
npm-debug.log*
yarn-debug.log*
yarn-error.log*
# Optional npm cache directory
.npm
# Optional eslint cache
.eslintcache
# Microbundle cache
.rpt2_cache/
.rts2_cache_cjs/
.rts2_cache_es/
.rts2_cache_umd/
# Optional REPL history
.node_repl_history
# Output of 'npm pack'
*.tgz
# Yarn Integrity file
.yarn-integrity
# dotenv environment variables file
.env
.env.test
.env.local
.env.production
# parcel-bundler cache (https://parceljs.org/)
.cache
.parcel-cache
# Next.js build output
.next
# Nuxt.js build / generate output
.nuxt
dist
# Gatsby files
.cache/
public
# Storybook build outputs
.out
.storybook-out
# Temporary folders
tmp/
temp/
# Editor backup files
*~
*.tmp
*.bak
# OS generated files
.DS_Store
.DS_Store?
._*
.Spotlight-V100
.Trashes
ehthumbs.db
Thumbs.db
# Docker
.dockerignore
Dockerfile*
docker-compose*.yml
# Kubernetes
*.yaml
*.yml
k8s/
# Test files
test/
tests/
*_test.py
test_*.py
# Models and large files (should be mounted as volumes)
models/
*.pth
*.bin
*.safetensors
*.ckpt
*.pt
# SSL certificates (should be mounted as volumes)
ssl_certs/
*.pem
*.key
*.crt
# Build artifacts that should be mounted
build/
output/
results/
# GitHub
.github/workflows/
.github/ISSUE_TEMPLATE/
.github/PULL_REQUEST_TEMPLATE.md
# IDE specific
.vscode/
.idea/
*.sublime-project
*.sublime-workspace
# Temporary files
*.tmp
*.temp
*.pid
*.lock
# Backup files
*.backup
*.old
*.orig
\ No newline at end of file
......@@ -59,6 +59,21 @@ src/debug_sdp.log
# Claude Code
.claude/
CLAUDE.md
# Docker & deployment
Dockerfile*
docker-compose*.yml
.dockerignore
coturn-data/
*_docker*.sh
build_and_run.sh
build_cuda128.sh
start_all_services.sh
stop_all_services.sh
# Assets (original project images)
assets/
# OS
.DS_Store
......
FROM nvidia/cuda:12.2.2-cudnn8-devel-ubuntu22.04
LABEL authors="HumanAIGC-Engineering"
ARG CONFIG_FILE=config/chat_with_minicpm.yaml
ENV DEBIAN_FRONTEND=noninteractive
# Use Tsinghua University APT mirrors
RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list && \
sed -i 's/security.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list
# Update package list and install required dependencies
RUN apt-get update && \
apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa && \
apt-get update && \
apt-get install -y python3.11 python3.11-dev python3.11-venv python3.11-distutils python3-pip git libgl1 libglib2.0-0
RUN update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 1 && \
python3.11 -m ensurepip --upgrade && \
python3.11 -m pip install --upgrade pip
ARG WORK_DIR=/root/open-avatar-chat
WORKDIR $WORK_DIR
# Install core dependencies
COPY ./install.py $WORK_DIR/install.py
COPY ./pyproject.toml $WORK_DIR/pyproject.toml
COPY ./src/third_party $WORK_DIR/src/third_party
RUN pip install uv && \
uv venv --python 3.11.11 && \
uv sync --no-install-workspace
ADD ./src $WORK_DIR/src
# Copy script files (must be copied before installing config dependencies)
ADD ./scripts $WORK_DIR/scripts
# Execute pre-config installation script
RUN echo "Using config file: ${CONFIG_FILE}"
COPY $CONFIG_FILE /tmp/build_config.yaml
RUN chmod +x $WORK_DIR/scripts/pre_config_install.sh && \
$WORK_DIR/scripts/pre_config_install.sh --config /tmp/build_config.yaml
# Install config dependencies
RUN uv run install.py \
--config /tmp/build_config.yaml \
--uv \
--skip-core
# Execute post-config installation script
RUN chmod +x $WORK_DIR/scripts/post_config_install.sh && \
$WORK_DIR/scripts/post_config_install.sh --config /tmp/build_config.yaml && \
rm /tmp/build_config.yaml
ADD ./resource $WORK_DIR/resource
ADD ./.env* $WORK_DIR/
WORKDIR $WORK_DIR
ENTRYPOINT ["uv", "run", "src/demo.py"]
FROM swr.cn-north-4.myhuaweicloud.com/ddn-k8s/docker.io/nvidia/cuda:12.8.1-cudnn-devel-ubuntu22.04
# Re-declare ARGs after FROM to make them available in subsequent layers
ARG WORK_DIR=/root/open-avatar-chat
# Image metadata with dynamic version
LABEL authors="HumanAIGC-Engineering"
# Environment variables for optimized build and runtime
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONUNBUFFERED=1 \
PYTHONDONTWRITEBYTECODE=1 \
PYTHONOPTIMIZE=1 \
PIP_NO_CACHE_DIR=1 \
PIP_DISABLE_PIP_VERSION_CHECK=1 \
UV_COMPILE_BYTECODE=0 \
WORK_DIR=$WORK_DIR
# =============================================================================
# System Dependencies Installation
# =============================================================================
# Use Tsinghua University mirrors for faster package downloads in China
# Install latest Python 3.11.x and essential system libraries for the application
RUN sed -i 's/archive.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list && \
sed -i 's/security.ubuntu.com/mirrors.tuna.tsinghua.edu.cn/g' /etc/apt/sources.list && \
apt-get update && \
apt-get install -y software-properties-common && \
add-apt-repository ppa:deadsnakes/ppa && \
apt-get update && \
apt-get install -y \
# Python 3.11 and development tools (latest available 3.11.x version)
python3.11 \
python3.11-dev \
python3.11-venv \
python3.11-distutils \
python3-pip \
# Version control and build tools
git \
git-lfs \
build-essential \
# Graphics and image libraries (required for OpenCV/PIL)
libgl1 \
libglib2.0-0 \
libsm6 \
libxext6 \
libgomp1 \
# Audio processing libraries (required for librosa, soundfile, torchaudio)
libsndfile1 \
ffmpeg \
sox \
libsox-dev \
# Multimedia codec libraries (required for video processing)
libavcodec-dev \
libavformat-dev \
libswscale-dev \
# Additional utilities
curl \
ca-certificates && \
# Configure git lfs
git lfs install && \
# Clean up package manager cache to reduce image size
apt-get clean && \
rm -rf /var/lib/apt/lists/* && \
# Set Python 3.11 as default python3 with higher priority
update-alternatives --install /usr/bin/python3 python3 /usr/bin/python3.11 2 && \
update-alternatives --set python3 /usr/bin/python3.11 && \
# Upgrade pip to latest version with optimizations
python3.11 -m ensurepip --upgrade && \
python3.11 -m pip install --no-cache-dir --upgrade pip setuptools wheel
# Set working directory for the application
WORKDIR $WORK_DIR
# =============================================================================
# Python Dependencies and Virtual Environment Setup
# =============================================================================
# Copy dependency files first for better Docker layer caching
COPY ./install.py $WORK_DIR/install.py
COPY ./pyproject.toml $WORK_DIR/pyproject.toml
# Copy all workspace member pyproject.toml files while preserving directory structure
COPY ./src/handlers/tts/edgetts/pyproject.toml $WORK_DIR/src/handlers/tts/edgetts/pyproject.toml
COPY ./src/handlers/tts/cosyvoice/pyproject.toml $WORK_DIR/src/handlers/tts/cosyvoice/pyproject.toml
COPY ./src/handlers/llm/minicpm/pyproject.toml $WORK_DIR/src/handlers/llm/minicpm/pyproject.toml
COPY ./src/handlers/avatar/liteavatar/pyproject.toml $WORK_DIR/src/handlers/avatar/liteavatar/pyproject.toml
COPY ./src/handlers/avatar/lam/pyproject.toml $WORK_DIR/src/handlers/avatar/lam/pyproject.toml
COPY ./src/handlers/vad/silerovad/pyproject.toml $WORK_DIR/src/handlers/vad/silerovad/pyproject.toml
COPY ./src/handlers/tts/bailian_tts/pyproject.toml $WORK_DIR/src/handlers/tts/bailian_tts/pyproject.toml
COPY ./src/handlers/asr/sensevoice/pyproject.toml $WORK_DIR/src/handlers/asr/sensevoice/pyproject.toml
COPY ./src/handlers/avatar/musetalk/pyproject.toml $WORK_DIR/src/handlers/avatar/musetalk/pyproject.toml
# Copy third_party directory
COPY ./src/third_party $WORK_DIR/src/third_party
# Install uv package manager and setup Python virtual environment using system Python 3.11.x
RUN pip install --no-cache-dir uv && \
uv venv --python /usr/bin/python3.11 --seed
# Activate virtual environment and install dependencies
RUN uv pip install setuptools pip wheel && \
uv sync --all-packages && \
uv pip uninstall mmcv && \
uv run mim install mmcv==2.2.0 --force && \
# Fix mmdet version compatibility
MMDET_INIT_FILE="$VENV_PATH/lib/python3.11/site-packages/mmdet/__init__.py" && \
if [ -f "$MMDET_INIT_FILE" ]; then \
sed -i "s/mmcv_maximum_version = '[^']*'/mmcv_maximum_version = '2.2.1'/g" "$MMDET_INIT_FILE"; \
fi
# =============================================================================
# Application Source Code and Configuration
# =============================================================================
# Copy build scripts, source code, and configuration first
COPY ./scripts $WORK_DIR/scripts
COPY ./src $WORK_DIR/src
COPY ./config $WORK_DIR/config
# =============================================================================
# Runtime Assets
# =============================================================================
# Copy runtime resources and environment files
COPY ./resource $WORK_DIR/resource
# COPY ./.env* $WORK_DIR/
RUN uv pip uninstall onnxruntime onnxruntime-gpu && \
uv pip install --no-cache-dir onnxruntime-gpu==1.20.2
# =============================================================================
# Final Cleanup and Optimization
# =============================================================================
# Clean up build artifacts and temporary files to reduce image size
RUN rm -rf /root/.cache/uv/* /tmp/* /var/tmp/*
ARG BUILD_VERSION=dev
ARG BUILD_DATE=""
ENV APP_VERSION=${BUILD_VERSION} \
BUILD_DATE=${BUILD_DATE}
# =============================================================================
# Container Entry Point
# =============================================================================
# Use uv to run the application with proper virtual environment
ENTRYPOINT ["uv", "run", "src/demo.py"]
......@@ -34,12 +34,11 @@ ChatEngine 编排引擎
- **实时音视频** — 基于 WebRTC (aiortc) 实现低延迟双向通信
- **数字人驱动** — 支持 LiteAvatar 轻量渲染、MuseTalk 唇形驱动、LAM 表情驱动
- **单机全流程** — 在单台 GPU 服务器上即可运行完整系统
- **Docker 部署** — 提供 Dockerfile 与 docker-compose,支持 TURN 服务器 (coturn)
## 环境要求
- **Python**: >=3.11.7, <3.12
- **GPU**: NVIDIA CUDA 兼容显卡(推荐 >=8GB 显存)
- **GPU**: NVIDIA CUDA 兼容显卡(推荐 >=24GB 显存)
- **OS**: Linux / Windows
- **Node.js**: >=18(前端开发需要)
......@@ -87,15 +86,8 @@ docker-compose up -d
系统使用 YAML 配置文件驱动,位于 `config/` 目录:
| 配置文件 | 说明 |
|---------|------|
| `chat_with_openai_compatible.yaml` | OpenAI 兼容 API(DeepSeek/GPT 等)+ 本地 ASR + 本地 TTS |
| `chat_with_openai_compatible_edge_tts.yaml` | OpenAI 兼容 API + Edge TTS |
| `chat_with_openai_compatible_bailian_cosyvoice.yaml` | OpenAI 兼容 API + 阿里百炼 CosyVoice |
| `chat_with_openai_compatible_bailian_cosyvoice_musetalk.yaml` | 上述配置 + MuseTalk 数字人 |
| `chat_with_lam.yaml` | LAM 数字人 + 本地模型 |
| `chat_with_minicpm.yaml` | 本地 MiniCPM 模型 |
| `chat_with_qwen_omni.yaml` | 通义千问多模态模型 |
配置结构示例:
......
This diff is collapsed.
#!/usr/bin/env bash
CONFIG_PATH=""
while [[ "$#" -gt 0 ]]; do
case $1 in
-config | --config )
CONFIG_PATH="$2"
shift 2
;;
esac
done
echo "${CONFIG_PATH}"
docker build \
--build-arg CONFIG_FILE=${CONFIG_PATH} \
-t open-avatar-chat:0.0.1 .
docker run --rm --gpus all -it --name open-avatar-chat \
--network=host \
-v `pwd`/build:/root/open-avatar-chat/build \
-v `pwd`/models:/root/open-avatar-chat/models \
-v `pwd`/ssl_certs:/root/open-avatar-chat/ssl_certs \
-v `pwd`/config:/root/open-avatar-chat/config \
-v `pwd`/models/musetalk/s3fd-619a316812/:/root/.cache/torch/hub/checkpoints/ \
-e NVIDIA_VISIBLE_DEVICES=all \
-e NVIDIA_DRIVER_CAPABILITIES=compute,video,utility \
-p 8282:8282 \
open-avatar-chat:0.0.1 \
--config ${CONFIG_PATH}
#!/usr/bin/env bash
# Build script for CUDA 12.8 Dockerfile with dynamic versioning
# Usage: ./build_cuda128.sh [--tag TAG] [--no-cache] [--push REGISTRY]
set -e
# Default values
IMAGE_TAG=""
NO_CACHE=""
PUSH_REGISTRY=""
CONFIG_PATH="config/chat_with_openai_compatible_bailian_cosyvoice.yaml"
# Colors for output
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
BLUE='\033[0;34m'
NC='\033[0m' # No Color
# Logging functions
log_info() {
echo -e "${BLUE}[INFO]${NC} $1"
}
log_success() {
echo -e "${GREEN}[SUCCESS]${NC} $1"
}
log_warning() {
echo -e "${YELLOW}[WARNING]${NC} $1"
}
log_error() {
echo -e "${RED}[ERROR]${NC} $1"
}
# Function to get version from git
get_version() {
if [ -d .git ]; then
# # Try to get version from git tag first
# local git_tag=$(git describe --tags --exact-match 2>/dev/null)
# if [ $? -eq 0 ] && [ -n "$git_tag" ]; then
# echo "$git_tag"
# return
# fi
# # Try to get version from nearest tag + commits
# local desc_tag=$(git describe --tags --always 2>/dev/null)
# if [ $? -eq 0 ] && [ -n "$desc_tag" ] && [[ "$desc_tag" == *"-"* ]]; then
# # Replace slash with underscore for Docker compatibility
# echo "$desc_tag" | sed 's/\//_/g'
# return
# fi
# Fallback: Generate version from branch and short commit
local branch=$(git rev-parse --abbrev-ref HEAD 2>/dev/null)
local commit=$(git rev-parse --short HEAD 2>/dev/null)
if [ -n "$branch" ] && [ -n "$commit" ]; then
# Replace slash with underscore for Docker compatibility
local clean_branch=$(echo "$branch" | sed 's/\//_/g')
echo "${clean_branch}-${commit}"
else
echo "unknown-$(date +%Y%m%d)"
fi
else
echo "dev-$(date +%Y%m%d)"
fi
}
# Parse arguments
while [[ "$#" -gt 0 ]]; do
case $1 in
--tag )
IMAGE_TAG="$2"
shift 2
;;
--no-cache )
NO_CACHE="--no-cache"
shift
;;
--push )
PUSH_REGISTRY="$2"
shift 2
;;
--config )
CONFIG_PATH="$2"
shift 2
;;
-h | --help )
echo "Usage: $0 [OPTIONS]"
echo "Options:"
echo " --tag TAG Docker镜像标签 (默认: open-avatar-chat:VERSION)"
echo " --no-cache 不使用Docker构建缓存"
echo " --push REGISTRY 构建后推送到指定注册表"
echo " --config PATH 运行时使用的配置文件路径"
echo " -h, --help 显示帮助信息"
echo ""
echo "Examples:"
echo " $0 # 基本构建"
echo " $0 --tag myapp:latest # 自定义标签"
echo " $0 --push ghcr.io/user/repo # 构建并推送"
echo " $0 --no-cache --push ghcr.io/user/repo # 强制重建并推送"
exit 0
;;
* )
log_error "未知参数: $1"
exit 1
;;
esac
done
# Validate Docker is available
if ! command -v docker &> /dev/null; then
log_error "Docker 未找到,请先安装Docker"
exit 1
fi
# Check if Dockerfile.cuda12.8 exists
if [ ! -f "Dockerfile.cuda12.8" ]; then
log_error "Dockerfile.cuda12.8 未找到"
exit 1
fi
# Set default image tag if not provided
if [ -z "$IMAGE_TAG" ]; then
IMAGE_TAG="open-avatar-chat:latest"
fi
# Get build metadata
BUILD_DATE=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
BUILD_VERSION=$(get_version)
log_info "=== 构建信息 ==="
log_info "镜像标签: ${IMAGE_TAG}"
log_info "构建版本: ${BUILD_VERSION} (嵌入到镜像标签中)"
log_info "构建时间: ${BUILD_DATE} (嵌入到镜像标签中)"
log_info "Dockerfile: Dockerfile.cuda12.8"
#Build command
BUILD_CMD="docker build ${NO_CACHE} \
--build-arg BUILD_VERSION=\"${BUILD_VERSION}\" \
--build-arg BUILD_DATE=\"${BUILD_DATE}\" \
-f Dockerfile.cuda12.8 \
-t \"${IMAGE_TAG}\" ."
log_info "=== 开始构建 ==="
log_info "执行命令: ${BUILD_CMD}"
# Execute build
if eval $BUILD_CMD; then
log_success "镜像构建成功: ${IMAGE_TAG}"
# Show image info
log_info "=== 镜像信息 ==="
docker images "${IMAGE_TAG}" --format "table {{.Repository}}\t{{.Tag}}\t{{.Size}}\t{{.CreatedAt}}"
# Push if requested
if [ -n "$PUSH_REGISTRY" ]; then
PUSH_TAG="${PUSH_REGISTRY}:${BUILD_VERSION}"
log_info "=== 推送镜像 ==="
log_info "标记镜像: ${PUSH_TAG}"
if docker tag "${IMAGE_TAG}" "${PUSH_TAG}"; then
log_info "推送镜像: ${PUSH_TAG}"
if docker push "${PUSH_TAG}"; then
log_success "镜像推送成功: ${PUSH_TAG}"
else
log_error "镜像推送失败"
exit 1
fi
else
log_error "镜像标记失败"
exit 1
fi
fi
# Provide usage instructions
log_info "=== 使用说明 ==="
echo ""
echo "查看镜像中的commit信息:"
echo "docker inspect \"${IMAGE_TAG}\" | grep -A 10 'Labels'"
echo ""
echo "启动容器:"
echo "docker run --rm --gpus all -it \\"
echo " --network=host \\"
echo " -v \$(pwd)/build:/root/open-avatar-chat/build \\"
echo " -v \$(pwd)/models:/root/open-avatar-chat/models \\"
echo " -v \$(pwd)/ssl_certs:/root/open-avatar-chat/ssl_certs \\"
echo " -v \$(pwd)/config:/root/open-avatar-chat/config \\"
echo " -v \$(pwd)/models/musetalk/s3fd-619a316812/:/root/.cache/torch/hub/checkpoints/ \\"
echo " -p 8282:8282 \\"
echo " \"${IMAGE_TAG}\" \\"
echo " --config \"${CONFIG_PATH}\""
echo ""
echo "在容器内查看版本信息:"
echo "docker run --rm \"${IMAGE_TAG}\" printenv | grep -E '^(APP_VERSION|BUILD_DATE|BUILD_COMMIT)='"
echo ""
else
log_error "镜像构建失败"
exit 1
fi
listening-port=3478
listening-ip=0.0.0.0
tls-listening-port=5349
realm=turn.open-avatar-chat.turnserver
user=admin:admin
lt-cred-mech
min-port:49152
max-port:65535
cert=/etc/turn_cert.pem
key=/etc/turn_key.pem
allowed-peer-ip=0.0.0.0
services:
coturn:
image: coturn/coturn
container_name: coturn-server
command: >
--log-file=stdout
environment:
- DETECT_EXTERNAL_IP=yes
- DETECT_RELAY_IP=yes
volumes:
- ./coturn-data/turnserver.conf:/etc/coturn/turnserver.conf
- ./ssl_certs/localhost.crt:/etc/turn_cert.pem
- ./ssl_certs/localhost.crt:/etc/turn_key.pem
network_mode: host
restart: unless-stopped
open-avatar-chat:
image: open-avatar-chat:latest
container_name: open-avatar-chat
environment:
- NVIDIA_VISIBLE_DEVICES=all
- NVIDIA_DRIVER_CAPABILITIES=compute,utility,video
# - UV_INDEX_URL=https://mirrors.aliyun.com/pypi/simple/
command: >
--config=/root/open-avatar-chat/config/chat_with_openai_compatible_bailian_cosyvoice.yaml
volumes:
- ./build:/root/open-avatar-chat/build \
- ./models:/root/open-avatar-chat/models \
- ./ssl_certs:/root/open-avatar-chat/ssl_certs \
- ./config:/root/open-avatar-chat/config \
- ./.env:/root/open-avatar-chat/.env \
- ./models/musetalk/s3fd-619a316812/:/root/.cache/torch/hub/checkpoints/ \
- ./exp:/root/open-avatar-chat/exp \
- ./resource:/root/open-avatar-chat/resource \
ports:
- "8282:8282"
networks:
- nat_network
shm_size: "2gb"
stdin_open: true
tty: true
restart: unless-stopped
depends_on:
- coturn
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: all
capabilities: [ gpu ]
networks:
nat_network:
driver: bridge
CONFIG_PATH=""
while [[ "$#" -gt 0 ]]; do
case $1 in
-config | --config )
CONFIG_PATH="$2"
shift 2
;;
esac
done
# Determine config file path
CONFIG_FILE="${CONFIG_PATH:-config/chat_with_openai_compatible_bailian_cosyvoice.yaml}"
# Check for AvatarMusetalk module and set environment variables(if Musetalk is used, set PYTORCH_JIT=0)
if grep -q "AvatarMusetalk:" "$CONFIG_FILE" 2>/dev/null && grep -q "module: avatar/musetalk/avatar_handler_musetalk" "$CONFIG_FILE" 2>/dev/null; then
echo "Detected AvatarMusetalk module, adding PYTORCH_JIT=0 environment variable"
ENV_VARS="-e PYTORCH_JIT=0"
else
echo "No AvatarMusetalk module detected in config, skipping PYTORCH_JIT environment variable"
fi
docker run --rm --gpus all -it --name open-avatar-chat \
--network=host \
$ENV_VARS \
-v `pwd`/build:/root/open-avatar-chat/build \
-v `pwd`/models:/root/open-avatar-chat/models \
-v `pwd`/ssl_certs:/root/open-avatar-chat/ssl_certs \
-v `pwd`/config:/root/open-avatar-chat/config \
-v `pwd`/.env:/root/open-avatar-chat/.env \
-v `pwd`/models/musetalk/s3fd-619a316812/:/root/.cache/torch/hub/checkpoints/ \
-v `pwd`/exp:/root/open-avatar-chat/exp \
-v `pwd`/resource:/root/open-avatar-chat/resource \
-e NVIDIA_VISIBLE_DEVICES=all \
-e NVIDIA_DRIVER_CAPABILITIES=compute,video,utility \
-p 8282:8282 \
open-avatar-chat:latest \
--config ${CONFIG_PATH:-config/chat_with_openai_compatible_bailian_cosyvoice.yaml}
\ No newline at end of file
#!/bin/bash
# Define log directory
LOG_DIR="logs"
mkdir -p $LOG_DIR
echo "Starting all services in background..."
# Function to run command in background with logging
run_background() {
local name="$1"
local command="$2"
local log_file="$LOG_DIR/${name// /_}.log"
echo "Starting $name..."
# Use setsid to detach from current terminal
nohup bash -c "$command" > "$log_file" 2>&1 &
local pid=$!
echo "$name started with PID: $pid. Log: $log_file"
echo $pid >> "$LOG_DIR/running_pids.txt"
}
# Clear previous PID file
> "$LOG_DIR/running_pids.txt"
# 1. FunASR Service
run_background "FunASR Service" "source $(conda info --base)/etc/profile.d/conda.sh && conda activate funasr && python /home/VoiceModel/asr/Fun-ASR/funasr_api.py"
# 2. Kokoro TTS Service
run_background "Kokoro TTS Service" "source $(conda info --base)/etc/profile.d/conda.sh && conda activate kokoro && python /home/VoiceModel/tts/kokoro-tts-zh/server.py"
# 3. MuseTalk Service
run_background "MuseTalk Service" "source $(conda info --base)/etc/profile.d/conda.sh && conda activate musetalk && python /home/VoiceModel/shuziren/MuseTalk-main/realtime_server.py"
# 4. RealTimeMedicalDialogue Demo
run_background "RealTimeMedicalDialogue Demo" "source $(conda info --base)/etc/profile.d/conda.sh && conda activate openavatar_new && python /home/VoiceModel/shuziren/RealTimeMedicalDialogue/src/demo.py"
# 5. Frontend Dev Server
run_background "Frontend Dev Server" "cd /home/VoiceModel/shuziren/RealTimeMedicalDialogue/src/handlers/client/rtc_client/frontend && pnpm dev"
echo "---------------------------------------------------"
echo "All services started!"
echo "You can check logs in '$LOG_DIR' directory."
echo "To stop all services, run: ./stop_all_services.sh"
echo "---------------------------------------------------"
#!/bin/bash
LOG_DIR="logs"
PID_FILE="$LOG_DIR/running_pids.txt"
if [ ! -f "$PID_FILE" ]; then
echo "No running services found (PID file missing)."
exit 0
fi
echo "Stopping all services..."
while read pid; do
if [ -n "$pid" ]; then
if kill -0 $pid 2>/dev/null; then
echo "Killing PID $pid..."
kill $pid
# Also kill child processes (like python under conda)
pkill -P $pid
else
echo "PID $pid not running."
fi
fi
done < "$PID_FILE"
# Clean up pnpm processes specifically as they spawn node
pkill -f "pnpm dev"
pkill -f "vite"
# Clean up python processes matching our paths (safety net)
pkill -f "funasr_api.py"
pkill -f "kokoro-tts-zh/server.py"
pkill -f "realtime_server.py"
pkill -f "RealTimeMedicalDialogue/src/demo.py"
rm "$PID_FILE"
echo "All services stopped."
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment