Files
xlxumu/docs/operations/部署文档.md

39 KiB
Raw Permalink Blame History

部署文档

版本历史

版本 日期 作者 变更说明
1.0 2024-01-20 运维团队 初始版本
1.1 2024-09-21 运维团队 更新部署架构,简化技术栈

1. 部署概述

1.1 部署架构

graph TB
    subgraph "负载均衡层"
        LB[Nginx负载均衡器]
    end
    
    subgraph "应用层"
        WEB1[Web服务器1]
        WEB2[Web服务器2]
        API1[API服务器1]
        API2[API服务器2]
    end
    
    subgraph "数据层"
        DB1[(MySQL主库)]
        DB2[(MySQL从库)]
        REDIS[(Redis集群)]
        MONGO[(MongoDB)]
    end
    
    subgraph "文件存储"
        OSS[对象存储]
    end
    
    LB --> WEB1
    LB --> WEB2
    LB --> API1
    LB --> API2
    
    WEB1 --> DB1
    WEB2 --> DB1
    API1 --> DB1
    API2 --> DB1
    
    DB1 --> DB2
    
    API1 --> REDIS
    API2 --> REDIS
    API1 --> MONGO
    API2 --> MONGO
    
    API1 --> OSS
    API2 --> OSS

1.2 部署环境

环境 用途 服务器配置 域名
开发环境 开发测试 2核4G dev.xlxumu.com
测试环境 功能测试 4核8G test.xlxumu.com
预生产环境 生产前验证 8核16G pre.xlxumu.com
生产环境 正式运行 16核32G www.xlxumu.com

1.3 技术栈版本

组件 版本 说明
Node.js 18.x 后端运行时
MySQL 8.0 主数据库
Redis 6.x 缓存数据库
MongoDB 5.x 文档数据库
Nginx 1.20+ Web服务器
Docker 20.x 容器化
Docker Compose 2.x 容器编排

2. 服务器环境准备

2.1 系统要求

# CentOS 7/8 系统配置
# 1. 更新系统
sudo yum update -y

# 2. 安装基础工具
sudo yum install -y wget curl git vim htop

# 3. 配置防火墙
sudo firewall-cmd --permanent --add-port=80/tcp
sudo firewall-cmd --permanent --add-port=443/tcp
sudo firewall-cmd --permanent --add-port=3000/tcp
sudo firewall-cmd --reload

# 4. 设置时区
sudo timedatectl set-timezone Asia/Shanghai

# 5. 配置系统限制
echo "* soft nofile 65536" >> /etc/security/limits.conf
echo "* hard nofile 65536" >> /etc/security/limits.conf

2.2 Docker环境安装

#!/bin/bash
# install-docker.sh

# 卸载旧版本
sudo yum remove docker docker-client docker-client-latest docker-common docker-latest docker-latest-logrotate docker-logrotate docker-engine

# 安装依赖
sudo yum install -y yum-utils device-mapper-persistent-data lvm2

# 添加Docker仓库
sudo yum-config-manager --add-repo https://download.docker.com/linux/centos/docker-ce.repo

# 安装Docker CE
sudo yum install -y docker-ce docker-ce-cli containerd.io

# 启动Docker服务
sudo systemctl start docker
sudo systemctl enable docker

# 安装Docker Compose
sudo curl -L "https://github.com/docker/compose/releases/download/v2.15.1/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose
sudo chmod +x /usr/local/bin/docker-compose

# 验证安装
docker --version
docker-compose --version

# 配置Docker镜像加速
sudo mkdir -p /etc/docker
sudo tee /etc/docker/daemon.json <<-'EOF'
{
  "registry-mirrors": [
    "https://mirror.ccs.tencentyun.com",
    "https://docker.mirrors.ustc.edu.cn"
  ],
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "100m",
    "max-file": "3"
  }
}
EOF

sudo systemctl daemon-reload
sudo systemctl restart docker

2.3 SSL证书配置

#!/bin/bash
# setup-ssl.sh

# 安装Certbot
sudo yum install -y epel-release
sudo yum install -y certbot python3-certbot-nginx

# 申请SSL证书
sudo certbot --nginx -d www.xlxumu.com -d api.xlxumu.com

# 设置自动续期
echo "0 12 * * * /usr/bin/certbot renew --quiet" | sudo crontab -

3. 数据库部署

3.1 MySQL部署配置

# docker-compose.mysql.yml
version: '3.8'

services:
  mysql-master:
    image: mysql:8.0
    container_name: mysql-master
    restart: always
    environment:
      MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
      MYSQL_DATABASE: xlxumu_db
      MYSQL_USER: xlxumu_user
      MYSQL_PASSWORD: ${MYSQL_PASSWORD}
    ports:
      - "3306:3306"
    volumes:
      - mysql_master_data:/var/lib/mysql
      - ./mysql/conf/master.cnf:/etc/mysql/conf.d/master.cnf
      - ./mysql/init:/docker-entrypoint-initdb.d
    command: --default-authentication-plugin=mysql_native_password
    networks:
      - xlxumu_network

  mysql-slave:
    image: mysql:8.0
    container_name: mysql-slave
    restart: always
    environment:
      MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD}
      MYSQL_DATABASE: xlxumu_db
      MYSQL_USER: xlxumu_user
      MYSQL_PASSWORD: ${MYSQL_PASSWORD}
    ports:
      - "3307:3306"
    volumes:
      - mysql_slave_data:/var/lib/mysql
      - ./mysql/conf/slave.cnf:/etc/mysql/conf.d/slave.cnf
    command: --default-authentication-plugin=mysql_native_password
    depends_on:
      - mysql-master
    networks:
      - xlxumu_network

volumes:
  mysql_master_data:
  mysql_slave_data:

networks:
  xlxumu_network:
    external: true

MySQL主从配置

# mysql/conf/master.cnf
[mysqld]
server-id = 1
log-bin = mysql-bin
binlog-format = ROW
binlog-do-db = xlxumu_db
expire_logs_days = 7
max_binlog_size = 100M

# 性能优化
innodb_buffer_pool_size = 1G
innodb_log_file_size = 256M
innodb_flush_log_at_trx_commit = 2
sync_binlog = 0
# mysql/conf/slave.cnf
[mysqld]
server-id = 2
relay-log = mysql-relay-bin
log-bin = mysql-bin
binlog-format = ROW
replicate-do-db = xlxumu_db
read_only = 1

数据库初始化脚本

-- mysql/init/01-init-database.sql
CREATE DATABASE IF NOT EXISTS xlxumu_db CHARACTER SET utf8mb4 COLLATE utf8mb4_unicode_ci;

USE xlxumu_db;

-- 创建用户表
CREATE TABLE users (
    id INT PRIMARY KEY AUTO_INCREMENT,
    username VARCHAR(50) UNIQUE NOT NULL,
    email VARCHAR(100) UNIQUE NOT NULL,
    password_hash VARCHAR(255) NOT NULL,
    role ENUM('admin', 'farmer', 'trader') NOT NULL,
    status ENUM('active', 'inactive', 'suspended') DEFAULT 'active',
    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
    INDEX idx_username (username),
    INDEX idx_email (email),
    INDEX idx_role (role)
);

-- 创建养殖场表
CREATE TABLE farms (
    id INT PRIMARY KEY AUTO_INCREMENT,
    name VARCHAR(100) NOT NULL,
    owner_id INT NOT NULL,
    location VARCHAR(200),
    area DECIMAL(10,2),
    description TEXT,
    status ENUM('active', 'inactive') DEFAULT 'active',
    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
    FOREIGN KEY (owner_id) REFERENCES users(id),
    INDEX idx_owner (owner_id),
    INDEX idx_status (status)
);

-- 创建动物表
CREATE TABLE animals (
    id INT PRIMARY KEY AUTO_INCREMENT,
    farm_id INT NOT NULL,
    tag_number VARCHAR(50) UNIQUE NOT NULL,
    breed VARCHAR(50),
    birth_date DATE,
    gender ENUM('male', 'female'),
    weight DECIMAL(8,2),
    health_status ENUM('healthy', 'sick', 'quarantine', 'deceased') DEFAULT 'healthy',
    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
    FOREIGN KEY (farm_id) REFERENCES farms(id),
    INDEX idx_farm (farm_id),
    INDEX idx_tag (tag_number),
    INDEX idx_breed (breed),
    INDEX idx_health_status (health_status)
);

-- 创建交易表
CREATE TABLE transactions (
    id INT PRIMARY KEY AUTO_INCREMENT,
    seller_id INT NOT NULL,
    buyer_id INT,
    animal_id INT NOT NULL,
    price DECIMAL(10,2) NOT NULL,
    status ENUM('pending', 'completed', 'cancelled') DEFAULT 'pending',
    transaction_date TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
    updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP,
    FOREIGN KEY (seller_id) REFERENCES users(id),
    FOREIGN KEY (buyer_id) REFERENCES users(id),
    FOREIGN KEY (animal_id) REFERENCES animals(id),
    INDEX idx_seller (seller_id),
    INDEX idx_buyer (buyer_id),
    INDEX idx_animal (animal_id),
    INDEX idx_status (status)
);

3.2 Redis集群部署

# docker-compose.redis.yml
version: '3.8'

services:
  redis-master:
    image: redis:6-alpine
    container_name: redis-master
    restart: always
    ports:
      - "6379:6379"
    volumes:
      - redis_master_data:/data
      - ./redis/redis-master.conf:/usr/local/etc/redis/redis.conf
    command: redis-server /usr/local/etc/redis/redis.conf
    networks:
      - xlxumu_network

  redis-slave:
    image: redis:6-alpine
    container_name: redis-slave
    restart: always
    ports:
      - "6380:6379"
    volumes:
      - redis_slave_data:/data
      - ./redis/redis-slave.conf:/usr/local/etc/redis/redis.conf
    command: redis-server /usr/local/etc/redis/redis.conf
    depends_on:
      - redis-master
    networks:
      - xlxumu_network

  redis-sentinel:
    image: redis:6-alpine
    container_name: redis-sentinel
    restart: always
    ports:
      - "26379:26379"
    volumes:
      - ./redis/sentinel.conf:/usr/local/etc/redis/sentinel.conf
    command: redis-sentinel /usr/local/etc/redis/sentinel.conf
    depends_on:
      - redis-master
      - redis-slave
    networks:
      - xlxumu_network

volumes:
  redis_master_data:
  redis_slave_data:

networks:
  xlxumu_network:
    external: true

Redis配置文件

# redis/redis-master.conf
bind 0.0.0.0
port 6379
requirepass your_redis_password
masterauth your_redis_password

# 持久化配置
save 900 1
save 300 10
save 60 10000

# 内存配置
maxmemory 1gb
maxmemory-policy allkeys-lru

# 日志配置
loglevel notice
logfile /var/log/redis/redis-server.log
# redis/redis-slave.conf
bind 0.0.0.0
port 6379
requirepass your_redis_password
masterauth your_redis_password

# 主从配置
replicaof redis-master 6379
replica-read-only yes

# 持久化配置
save 900 1
save 300 10
save 60 10000

# 内存配置
maxmemory 1gb
maxmemory-policy allkeys-lru

3.3 MongoDB部署

# docker-compose.mongodb.yml
version: '3.8'

services:
  mongodb:
    image: mongo:5
    container_name: mongodb
    restart: always
    environment:
      MONGO_INITDB_ROOT_USERNAME: ${MONGO_ROOT_USERNAME}
      MONGO_INITDB_ROOT_PASSWORD: ${MONGO_ROOT_PASSWORD}
      MONGO_INITDB_DATABASE: xlxumu_logs
    ports:
      - "27017:27017"
    volumes:
      - mongodb_data:/data/db
      - ./mongodb/init:/docker-entrypoint-initdb.d
    networks:
      - xlxumu_network

volumes:
  mongodb_data:

networks:
  xlxumu_network:
    external: true

4. 应用部署

4.1 后端API服务部署

# backend/Dockerfile
FROM node:18-alpine

# 设置工作目录
WORKDIR /app

# 复制package文件
COPY package*.json ./

# 安装依赖
RUN npm ci --only=production

# 复制源代码
COPY . .

# 创建非root用户
RUN addgroup -g 1001 -S nodejs
RUN adduser -S nodejs -u 1001

# 更改文件所有者
RUN chown -R nodejs:nodejs /app
USER nodejs

# 暴露端口
EXPOSE 3000

# 健康检查
HEALTHCHECK --interval=30s --timeout=3s --start-period=5s --retries=3 \
  CMD curl -f http://localhost:3000/health || exit 1

# 启动应用
CMD ["npm", "start"]
# docker-compose.backend.yml
version: '3.8'

services:
  backend-api-1:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: backend-api-1
    restart: always
    environment:
      NODE_ENV: production
      PORT: 3000
      DATABASE_URL: mysql://xlxumu_user:${MYSQL_PASSWORD}@mysql-master:3306/xlxumu_db
      REDIS_URL: redis://:${REDIS_PASSWORD}@redis-master:6379
      MONGODB_URL: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongodb:27017/xlxumu_logs
      JWT_SECRET: ${JWT_SECRET}
    ports:
      - "3001:3000"
    volumes:
      - ./logs:/app/logs
    depends_on:
      - mysql-master
      - redis-master
      - mongodb
    networks:
      - xlxumu_network
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
      interval: 30s
      timeout: 10s
      retries: 3

  backend-api-2:
    build:
      context: ./backend
      dockerfile: Dockerfile
    container_name: backend-api-2
    restart: always
    environment:
      NODE_ENV: production
      PORT: 3000
      DATABASE_URL: mysql://xlxumu_user:${MYSQL_PASSWORD}@mysql-master:3306/xlxumu_db
      REDIS_URL: redis://:${REDIS_PASSWORD}@redis-master:6379
      MONGODB_URL: mongodb://${MONGO_ROOT_USERNAME}:${MONGO_ROOT_PASSWORD}@mongodb:27017/xlxumu_logs
      JWT_SECRET: ${JWT_SECRET}
    ports:
      - "3002:3000"
    volumes:
      - ./logs:/app/logs
    depends_on:
      - mysql-master
      - redis-master
      - mongodb
    networks:
      - xlxumu_network
    healthcheck:
      test: ["CMD", "curl", "-f", "http://localhost:3000/health"]
      interval: 30s
      timeout: 10s
      retries: 3

networks:
  xlxumu_network:
    external: true

4.2 前端应用部署

# admin-system/Dockerfile
FROM node:18-alpine AS builder

WORKDIR /app
COPY package*.json ./
RUN npm ci

COPY . .
RUN npm run build

FROM nginx:alpine

# 复制构建产物
COPY --from=builder /app/dist /usr/share/nginx/html

# 复制nginx配置
COPY nginx.conf /etc/nginx/nginx.conf

# 暴露端口
EXPOSE 80

CMD ["nginx", "-g", "daemon off;"]
# admin-system/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 1024;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for"';
    
    access_log /var/log/nginx/access.log main;
    
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    
    # Gzip压缩
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_types text/plain text/css text/xml text/javascript application/javascript application/xml+rss application/json;
    
    server {
        listen 80;
        server_name _;
        root /usr/share/nginx/html;
        index index.html;
        
        # 处理SPA路由
        location / {
            try_files $uri $uri/ /index.html;
        }
        
        # 静态资源缓存
        location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg)$ {
            expires 1y;
            add_header Cache-Control "public, immutable";
        }
        
        # 安全头
        add_header X-Frame-Options "SAMEORIGIN" always;
        add_header X-Content-Type-Options "nosniff" always;
        add_header X-XSS-Protection "1; mode=block" always;
    }
}

4.3 Nginx负载均衡配置

# nginx/nginx.conf
user nginx;
worker_processes auto;
error_log /var/log/nginx/error.log warn;
pid /var/run/nginx.pid;

events {
    worker_connections 1024;
    use epoll;
    multi_accept on;
}

http {
    include /etc/nginx/mime.types;
    default_type application/octet-stream;
    
    # 日志格式
    log_format main '$remote_addr - $remote_user [$time_local] "$request" '
                    '$status $body_bytes_sent "$http_referer" '
                    '"$http_user_agent" "$http_x_forwarded_for" '
                    '$request_time $upstream_response_time';
    
    access_log /var/log/nginx/access.log main;
    
    # 基础配置
    sendfile on;
    tcp_nopush on;
    tcp_nodelay on;
    keepalive_timeout 65;
    types_hash_max_size 2048;
    client_max_body_size 50M;
    
    # Gzip压缩
    gzip on;
    gzip_vary on;
    gzip_min_length 1024;
    gzip_comp_level 6;
    gzip_types
        text/plain
        text/css
        text/xml
        text/javascript
        application/javascript
        application/xml+rss
        application/json;
    
    # 上游服务器配置
    upstream backend_api {
        least_conn;
        server backend-api-1:3000 max_fails=3 fail_timeout=30s;
        server backend-api-2:3000 max_fails=3 fail_timeout=30s;
        keepalive 32;
    }
    
    upstream admin_web {
        server admin-web-1:80 max_fails=3 fail_timeout=30s;
        server admin-web-2:80 max_fails=3 fail_timeout=30s;
    }
    
    # 限流配置
    limit_req_zone $binary_remote_addr zone=api:10m rate=10r/s;
    limit_req_zone $binary_remote_addr zone=login:10m rate=1r/s;
    
    # API服务器配置
    server {
        listen 80;
        server_name api.xlxumu.com;
        
        # 重定向到HTTPS
        return 301 https://$server_name$request_uri;
    }
    
    server {
        listen 443 ssl http2;
        server_name api.xlxumu.com;
        
        # SSL配置
        ssl_certificate /etc/letsencrypt/live/api.xlxumu.com/fullchain.pem;
        ssl_certificate_key /etc/letsencrypt/live/api.xlxumu.com/privkey.pem;
        ssl_protocols TLSv1.2 TLSv1.3;
        ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384;
        ssl_prefer_server_ciphers off;
        
        # API代理
        location /api/ {
            limit_req zone=api burst=20 nodelay;
            
            proxy_pass http://backend_api;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
            
            # 超时配置
            proxy_connect_timeout 5s;
            proxy_send_timeout 60s;
            proxy_read_timeout 60s;
            
            # 缓冲配置
            proxy_buffering on;
            proxy_buffer_size 4k;
            proxy_buffers 8 4k;
        }
        
        # 登录接口特殊限流
        location /api/auth/login {
            limit_req zone=login burst=5 nodelay;
            
            proxy_pass http://backend_api;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }
        
        # 健康检查
        location /health {
            access_log off;
            return 200 "healthy\n";
            add_header Content-Type text/plain;
        }
    }
    
    # 管理后台配置
    server {
        listen 80;
        server_name admin.xlxumu.com;
        return 301 https://$server_name$request_uri;
    }
    
    server {
        listen 443 ssl http2;
        server_name admin.xlxumu.com;
        
        # SSL配置
        ssl_certificate /etc/letsencrypt/live/admin.xlxumu.com/fullchain.pem;
        ssl_certificate_key /etc/letsencrypt/live/admin.xlxumu.com/privkey.pem;
        ssl_protocols TLSv1.2 TLSv1.3;
        ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384;
        ssl_prefer_server_ciphers off;
        
        # 静态文件代理
        location / {
            proxy_pass http://admin_web;
            proxy_set_header Host $host;
            proxy_set_header X-Real-IP $remote_addr;
            proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
            proxy_set_header X-Forwarded-Proto $scheme;
        }
        
        # 静态资源缓存
        location ~* \.(js|css|png|jpg|jpeg|gif|ico|svg|woff|woff2)$ {
            proxy_pass http://admin_web;
            expires 1y;
            add_header Cache-Control "public, immutable";
        }
    }
}

5. 部署脚本

5.1 一键部署脚本

#!/bin/bash
# deploy.sh - 一键部署脚本

set -e

# 配置变量
PROJECT_NAME="xlxumu"
DEPLOY_ENV=${1:-production}
BACKUP_DIR="/backup"
LOG_FILE="/var/log/deploy.log"

# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'

# 日志函数
log() {
    echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] $1${NC}" | tee -a $LOG_FILE
}

error() {
    echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR: $1${NC}" | tee -a $LOG_FILE
    exit 1
}

warn() {
    echo -e "${YELLOW}[$(date +'%Y-%m-%d %H:%M:%S')] WARNING: $1${NC}" | tee -a $LOG_FILE
}

# 检查环境
check_environment() {
    log "检查部署环境..."
    
    # 检查Docker
    if ! command -v docker &> /dev/null; then
        error "Docker未安装"
    fi
    
    # 检查Docker Compose
    if ! command -v docker-compose &> /dev/null; then
        error "Docker Compose未安装"
    fi
    
    # 检查环境变量文件
    if [ ! -f ".env.${DEPLOY_ENV}" ]; then
        error "环境变量文件 .env.${DEPLOY_ENV} 不存在"
    fi
    
    log "环境检查完成"
}

# 备份数据
backup_data() {
    log "开始数据备份..."
    
    BACKUP_DATE=$(date +%Y%m%d_%H%M%S)
    BACKUP_PATH="${BACKUP_DIR}/${PROJECT_NAME}_${BACKUP_DATE}"
    
    mkdir -p $BACKUP_PATH
    
    # 备份数据库
    if docker ps | grep -q mysql-master; then
        log "备份MySQL数据库..."
        docker exec mysql-master mysqldump -u root -p${MYSQL_ROOT_PASSWORD} --all-databases > ${BACKUP_PATH}/mysql_backup.sql
    fi
    
    # 备份Redis数据
    if docker ps | grep -q redis-master; then
        log "备份Redis数据..."
        docker exec redis-master redis-cli --rdb ${BACKUP_PATH}/redis_backup.rdb
    fi
    
    # 备份应用数据
    if [ -d "./data" ]; then
        log "备份应用数据..."
        cp -r ./data ${BACKUP_PATH}/
    fi
    
    log "数据备份完成: $BACKUP_PATH"
}

# 拉取最新代码
pull_code() {
    log "拉取最新代码..."
    
    git fetch origin
    git checkout main
    git pull origin main
    
    log "代码更新完成"
}

# 构建镜像
build_images() {
    log "构建Docker镜像..."
    
    # 复制环境变量文件
    cp .env.${DEPLOY_ENV} .env
    
    # 构建后端镜像
    log "构建后端API镜像..."
    docker build -t ${PROJECT_NAME}/backend:latest ./backend
    
    # 构建管理后台镜像
    log "构建管理后台镜像..."
    docker build -t ${PROJECT_NAME}/admin:latest ./admin-system
    
    log "镜像构建完成"
}

# 停止旧服务
stop_services() {
    log "停止旧服务..."
    
    if [ -f "docker-compose.yml" ]; then
        docker-compose down
    fi
    
    log "旧服务已停止"
}

# 启动新服务
start_services() {
    log "启动新服务..."
    
    # 创建网络
    docker network create xlxumu_network 2>/dev/null || true
    
    # 启动数据库服务
    log "启动数据库服务..."
    docker-compose -f docker-compose.mysql.yml up -d
    docker-compose -f docker-compose.redis.yml up -d
    docker-compose -f docker-compose.mongodb.yml up -d
    
    # 等待数据库启动
    log "等待数据库启动..."
    sleep 30
    
    # 启动应用服务
    log "启动应用服务..."
    docker-compose -f docker-compose.backend.yml up -d
    docker-compose -f docker-compose.frontend.yml up -d
    
    # 启动Nginx
    log "启动Nginx..."
    docker-compose -f docker-compose.nginx.yml up -d
    
    log "服务启动完成"
}

# 健康检查
health_check() {
    log "执行健康检查..."
    
    # 检查API服务
    for i in {1..30}; do
        if curl -f http://localhost:3001/health &>/dev/null; then
            log "API服务1健康检查通过"
            break
        fi
        if [ $i -eq 30 ]; then
            error "API服务1健康检查失败"
        fi
        sleep 2
    done
    
    for i in {1..30}; do
        if curl -f http://localhost:3002/health &>/dev/null; then
            log "API服务2健康检查通过"
            break
        fi
        if [ $i -eq 30 ]; then
            error "API服务2健康检查失败"
        fi
        sleep 2
    done
    
    # 检查前端服务
    for i in {1..30}; do
        if curl -f http://localhost:80 &>/dev/null; then
            log "前端服务健康检查通过"
            break
        fi
        if [ $i -eq 30 ]; then
            error "前端服务健康检查失败"
        fi
        sleep 2
    done
    
    log "健康检查完成"
}

# 清理旧镜像
cleanup() {
    log "清理旧镜像..."
    
    # 删除未使用的镜像
    docker image prune -f
    
    # 删除未使用的容器
    docker container prune -f
    
    log "清理完成"
}

# 发送通知
send_notification() {
    log "发送部署通知..."
    
    # 这里可以集成钉钉、企业微信等通知
    # curl -X POST "https://oapi.dingtalk.com/robot/send?access_token=YOUR_TOKEN" \
    #      -H 'Content-Type: application/json' \
    #      -d '{"msgtype": "text","text": {"content": "部署完成"}}'
    
    log "部署通知已发送"
}

# 主函数
main() {
    log "开始部署 ${PROJECT_NAME}${DEPLOY_ENV} 环境"
    
    check_environment
    backup_data
    pull_code
    build_images
    stop_services
    start_services
    health_check
    cleanup
    send_notification
    
    log "部署完成!"
}

# 错误处理
trap 'error "部署过程中发生错误"' ERR

# 执行主函数
main "$@"

5.2 回滚脚本

#!/bin/bash
# rollback.sh - 回滚脚本

set -e

BACKUP_DIR="/backup"
PROJECT_NAME="xlxumu"

# 颜色输出
RED='\033[0;31m'
GREEN='\033[0;32m'
YELLOW='\033[1;33m'
NC='\033[0m'

log() {
    echo -e "${GREEN}[$(date +'%Y-%m-%d %H:%M:%S')] $1${NC}"
}

error() {
    echo -e "${RED}[$(date +'%Y-%m-%d %H:%M:%S')] ERROR: $1${NC}"
    exit 1
}

# 列出可用备份
list_backups() {
    log "可用备份列表:"
    ls -la ${BACKUP_DIR}/${PROJECT_NAME}_* 2>/dev/null || error "没有找到备份文件"
}

# 回滚到指定备份
rollback_to_backup() {
    local backup_name=$1
    local backup_path="${BACKUP_DIR}/${backup_name}"
    
    if [ ! -d "$backup_path" ]; then
        error "备份目录不存在: $backup_path"
    fi
    
    log "开始回滚到备份: $backup_name"
    
    # 停止当前服务
    log "停止当前服务..."
    docker-compose down
    
    # 恢复数据库
    if [ -f "${backup_path}/mysql_backup.sql" ]; then
        log "恢复MySQL数据库..."
        docker-compose -f docker-compose.mysql.yml up -d mysql-master
        sleep 30
        docker exec -i mysql-master mysql -u root -p${MYSQL_ROOT_PASSWORD} < ${backup_path}/mysql_backup.sql
    fi
    
    # 恢复Redis数据
    if [ -f "${backup_path}/redis_backup.rdb" ]; then
        log "恢复Redis数据..."
        docker cp ${backup_path}/redis_backup.rdb redis-master:/data/dump.rdb
        docker restart redis-master
    fi
    
    # 恢复应用数据
    if [ -d "${backup_path}/data" ]; then
        log "恢复应用数据..."
        rm -rf ./data
        cp -r ${backup_path}/data ./
    fi
    
    # 重启服务
    log "重启服务..."
    docker-compose up -d
    
    log "回滚完成"
}

# 主函数
main() {
    if [ $# -eq 0 ]; then
        list_backups
        echo "使用方法: $0 <backup_name>"
        exit 1
    fi
    
    rollback_to_backup $1
}

main "$@"

5.3 监控脚本

#!/bin/bash
# monitor.sh - 服务监控脚本

# 配置
SERVICES=("mysql-master" "redis-master" "mongodb" "backend-api-1" "backend-api-2" "nginx")
LOG_FILE="/var/log/monitor.log"
ALERT_EMAIL="admin@xlxumu.com"

# 检查服务状态
check_service() {
    local service=$1
    
    if docker ps --format "table {{.Names}}" | grep -q "^${service}$"; then
        echo "✅ $service 运行正常"
        return 0
    else
        echo "❌ $service 服务异常"
        return 1
    fi
}

# 检查服务健康状态
check_health() {
    local service=$1
    
    case $service in
        "backend-api-1")
            curl -f http://localhost:3001/health &>/dev/null
            ;;
        "backend-api-2")
            curl -f http://localhost:3002/health &>/dev/null
            ;;
        "nginx")
            curl -f http://localhost:80/health &>/dev/null
            ;;
        *)
            return 0
            ;;
    esac
}

# 检查系统资源
check_resources() {
    echo "=== 系统资源检查 ==="
    
    # CPU使用率
    cpu_usage=$(top -bn1 | grep "Cpu(s)" | awk '{print $2}' | awk -F'%' '{print $1}')
    echo "CPU使用率: ${cpu_usage}%"
    
    # 内存使用率
    mem_usage=$(free | grep Mem | awk '{printf("%.2f"), $3/$2 * 100.0}')
    echo "内存使用率: ${mem_usage}%"
    
    # 磁盘使用率
    disk_usage=$(df -h / | awk 'NR==2 {print $5}')
    echo "磁盘使用率: $disk_usage"
    
    # 检查阈值
    if (( $(echo "$cpu_usage > 80" | bc -l) )); then
        echo "⚠️  CPU使用率过高: ${cpu_usage}%"
    fi
    
    if (( $(echo "$mem_usage > 80" | bc -l) )); then
        echo "⚠️  内存使用率过高: ${mem_usage}%"
    fi
}

# 发送告警
send_alert() {
    local message=$1
    
    # 发送邮件告警
    echo "$message" | mail -s "服务告警" $ALERT_EMAIL
    
    # 记录日志
    echo "[$(date)] ALERT: $message" >> $LOG_FILE
}

# 主监控循环
main() {
    echo "=== 服务监控开始 $(date) ==="
    
    failed_services=()
    
    # 检查所有服务
    for service in "${SERVICES[@]}"; do
        if ! check_service $service; then
            failed_services+=($service)
        elif ! check_health $service; then
            echo "⚠️  $service 健康检查失败"
            failed_services+=($service)
        fi
    done
    
    # 检查系统资源
    check_resources
    
    # 处理失败的服务
    if [ ${#failed_services[@]} -gt 0 ]; then
        alert_message="以下服务异常: ${failed_services[*]}"
        echo "$alert_message"
        send_alert "$alert_message"
    else
        echo "✅ 所有服务运行正常"
    fi
    
    echo "=== 监控完成 $(date) ==="
}

# 如果作为定时任务运行
if [ "$1" = "cron" ]; then
    main >> $LOG_FILE 2>&1
else
    main
fi

6. 环境配置

6.1 环境变量配置

# .env.production
# 数据库配置
MYSQL_ROOT_PASSWORD=your_mysql_root_password
MYSQL_PASSWORD=your_mysql_password
REDIS_PASSWORD=your_redis_password
MONGO_ROOT_USERNAME=admin
MONGO_ROOT_PASSWORD=your_mongo_password

# 应用配置
NODE_ENV=production
JWT_SECRET=your_jwt_secret_key
API_BASE_URL=https://api.xlxumu.com

# 第三方服务
ALIYUN_ACCESS_KEY_ID=your_aliyun_access_key
ALIYUN_ACCESS_KEY_SECRET=your_aliyun_secret_key
WECHAT_APP_ID=your_wechat_app_id
WECHAT_APP_SECRET=your_wechat_app_secret

# 监控配置
SENTRY_DSN=your_sentry_dsn
LOG_LEVEL=info
# .env.staging
# 测试环境配置
MYSQL_ROOT_PASSWORD=staging_mysql_password
MYSQL_PASSWORD=staging_mysql_password
REDIS_PASSWORD=staging_redis_password
MONGO_ROOT_USERNAME=admin
MONGO_ROOT_PASSWORD=staging_mongo_password

NODE_ENV=staging
JWT_SECRET=staging_jwt_secret
API_BASE_URL=https://test-api.xlxumu.com

LOG_LEVEL=debug

6.2 Docker Compose主配置

# docker-compose.yml
version: '3.8'

services:
  # 数据库服务
  mysql-master:
    extends:
      file: docker-compose.mysql.yml
      service: mysql-master

  redis-master:
    extends:
      file: docker-compose.redis.yml
      service: redis-master

  mongodb:
    extends:
      file: docker-compose.mongodb.yml
      service: mongodb

  # 应用服务
  backend-api-1:
    extends:
      file: docker-compose.backend.yml
      service: backend-api-1

  backend-api-2:
    extends:
      file: docker-compose.backend.yml
      service: backend-api-2

  # 前端服务
  admin-web:
    build:
      context: ./admin-system
      dockerfile: Dockerfile
    container_name: admin-web
    restart: always
    ports:
      - "8080:80"
    networks:
      - xlxumu_network

  # 负载均衡
  nginx:
    image: nginx:alpine
    container_name: nginx-lb
    restart: always
    ports:
      - "80:80"
      - "443:443"
    volumes:
      - ./nginx/nginx.conf:/etc/nginx/nginx.conf
      - ./nginx/ssl:/etc/nginx/ssl
      - /etc/letsencrypt:/etc/letsencrypt
    depends_on:
      - backend-api-1
      - backend-api-2
      - admin-web
    networks:
      - xlxumu_network

networks:
  xlxumu_network:
    driver: bridge

7. 部署流程

7.1 首次部署流程

# 1. 准备服务器环境
./scripts/setup-server.sh

# 2. 克隆项目代码
git clone https://github.com/your-org/xlxumu.git
cd xlxumu

# 3. 配置环境变量
cp .env.example .env.production
vim .env.production

# 4. 执行部署
./scripts/deploy.sh production

# 5. 验证部署
./scripts/health-check.sh

7.2 更新部署流程

# 1. 备份当前数据
./scripts/backup.sh

# 2. 拉取最新代码
git pull origin main

# 3. 执行部署
./scripts/deploy.sh production

# 4. 验证部署
./scripts/health-check.sh

# 5. 如有问题,执行回滚
# ./scripts/rollback.sh backup_20240120_143000

7.3 蓝绿部署流程

#!/bin/bash
# blue-green-deploy.sh

CURRENT_ENV=$(docker ps --format "table {{.Names}}" | grep backend | head -1 | grep -o "blue\|green" || echo "blue")
TARGET_ENV=$([ "$CURRENT_ENV" = "blue" ] && echo "green" || echo "blue")

echo "当前环境: $CURRENT_ENV"
echo "目标环境: $TARGET_ENV"

# 1. 部署到目标环境
echo "部署到 $TARGET_ENV 环境..."
docker-compose -f docker-compose.${TARGET_ENV}.yml up -d

# 2. 健康检查
echo "执行健康检查..."
sleep 30
if ! curl -f http://localhost:300${TARGET_ENV:0:1}/health; then
    echo "健康检查失败,回滚..."
    docker-compose -f docker-compose.${TARGET_ENV}.yml down
    exit 1
fi

# 3. 切换流量
echo "切换流量到 $TARGET_ENV 环境..."
sed -i "s/backend-${CURRENT_ENV}/backend-${TARGET_ENV}/g" nginx/nginx.conf
docker exec nginx-lb nginx -s reload

# 4. 停止旧环境
echo "停止 $CURRENT_ENV 环境..."
sleep 60  # 等待连接排空
docker-compose -f docker-compose.${CURRENT_ENV}.yml down

echo "蓝绿部署完成"

8. 监控和日志

8.1 日志收集配置

# docker-compose.logging.yml
version: '3.8'

services:
  elasticsearch:
    image: docker.elastic.co/elasticsearch/elasticsearch:7.15.0
    container_name: elasticsearch
    environment:
      - discovery.type=single-node
      - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
    ports:
      - "9200:9200"
    volumes:
      - elasticsearch_data:/usr/share/elasticsearch/data
    networks:
      - xlxumu_network

  logstash:
    image: docker.elastic.co/logstash/logstash:7.15.0
    container_name: logstash
    volumes:
      - ./logstash/pipeline:/usr/share/logstash/pipeline
      - ./logs:/logs
    ports:
      - "5044:5044"
    depends_on:
      - elasticsearch
    networks:
      - xlxumu_network

  kibana:
    image: docker.elastic.co/kibana/kibana:7.15.0
    container_name: kibana
    ports:
      - "5601:5601"
    environment:
      ELASTICSEARCH_HOSTS: http://elasticsearch:9200
    depends_on:
      - elasticsearch
    networks:
      - xlxumu_network

volumes:
  elasticsearch_data:

networks:
  xlxumu_network:
    external: true

8.2 Prometheus监控配置

# prometheus/prometheus.yml
global:
  scrape_interval: 15s
  evaluation_interval: 15s

rule_files:
  - "alert_rules.yml"

alerting:
  alertmanagers:
    - static_configs:
        - targets:
          - alertmanager:9093

scrape_configs:
  - job_name: 'prometheus'
    static_configs:
      - targets: ['localhost:9090']

  - job_name: 'node-exporter'
    static_configs:
      - targets: ['node-exporter:9100']

  - job_name: 'backend-api'
    static_configs:
      - targets: ['backend-api-1:3000', 'backend-api-2:3000']
    metrics_path: '/metrics'

  - job_name: 'nginx'
    static_configs:
      - targets: ['nginx:9113']

  - job_name: 'mysql'
    static_configs:
      - targets: ['mysql-exporter:9104']

  - job_name: 'redis'
    static_configs:
      - targets: ['redis-exporter:9121']

9. 安全配置

9.1 防火墙配置

#!/bin/bash
# setup-firewall.sh

# 清空现有规则
iptables -F
iptables -X
iptables -t nat -F
iptables -t nat -X

# 设置默认策略
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT ACCEPT

# 允许本地回环
iptables -A INPUT -i lo -j ACCEPT
iptables -A OUTPUT -o lo -j ACCEPT

# 允许已建立的连接
iptables -A INPUT -m state --state ESTABLISHED,RELATED -j ACCEPT

# 允许SSH
iptables -A INPUT -p tcp --dport 22 -j ACCEPT

# 允许HTTP/HTTPS
iptables -A INPUT -p tcp --dport 80 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -j ACCEPT

# 允许内部网络访问数据库
iptables -A INPUT -s 172.18.0.0/16 -p tcp --dport 3306 -j ACCEPT
iptables -A INPUT -s 172.18.0.0/16 -p tcp --dport 6379 -j ACCEPT
iptables -A INPUT -s 172.18.0.0/16 -p tcp --dport 27017 -j ACCEPT

# 防止DDoS攻击
iptables -A INPUT -p tcp --dport 80 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT
iptables -A INPUT -p tcp --dport 443 -m limit --limit 25/minute --limit-burst 100 -j ACCEPT

# 保存规则
service iptables save

9.2 SSL/TLS配置

#!/bin/bash
# setup-ssl.sh

# 生成强DH参数
openssl dhparam -out /etc/nginx/ssl/dhparam.pem 2048

# 配置SSL证书自动续期
cat > /etc/cron.d/certbot << EOF
0 12 * * * /usr/bin/certbot renew --quiet --post-hook "docker exec nginx-lb nginx -s reload"
EOF

10. 故障排查

10.1 常见问题排查

#!/bin/bash
# troubleshoot.sh

echo "=== 系统故障排查 ==="

# 检查Docker服务
echo "1. 检查Docker服务状态"
systemctl status docker

# 检查容器状态
echo "2. 检查容器状态"
docker ps -a

# 检查容器日志
echo "3. 检查容器日志"
for container in $(docker ps --format "{{.Names}}"); do
    echo "--- $container 日志 ---"
    docker logs --tail 50 $container
done

# 检查网络连接
echo "4. 检查网络连接"
netstat -tlnp | grep -E "(80|443|3000|3306|6379|27017)"

# 检查磁盘空间
echo "5. 检查磁盘空间"
df -h

# 检查内存使用
echo "6. 检查内存使用"
free -h

# 检查CPU使用
echo "7. 检查CPU使用"
top -bn1 | head -20

# 检查数据库连接
echo "8. 检查数据库连接"
docker exec mysql-master mysql -u root -p${MYSQL_ROOT_PASSWORD} -e "SHOW PROCESSLIST;"

# 检查Redis连接
echo "9. 检查Redis连接"
docker exec redis-master redis-cli ping

10.2 性能优化建议

# 系统内核参数优化
cat >> /etc/sysctl.conf << EOF
# 网络优化
net.core.somaxconn = 65535
net.core.netdev_max_backlog = 5000
net.ipv4.tcp_max_syn_backlog = 65535
net.ipv4.tcp_fin_timeout = 10
net.ipv4.tcp_keepalive_time = 1200
net.ipv4.tcp_max_tw_buckets = 5000

# 文件描述符限制
fs.file-max = 65535
EOF

sysctl -p

11. 总结

11.1 部署检查清单

  • 服务器环境准备完成
  • Docker和Docker Compose安装完成
  • SSL证书配置完成
  • 环境变量配置完成
  • 数据库初始化完成
  • 应用服务部署完成
  • 负载均衡配置完成
  • 监控系统配置完成
  • 日志收集配置完成
  • 备份策略配置完成
  • 安全配置完成
  • 健康检查通过
  • 性能测试通过

11.2 运维要点

  1. 定期备份:每日自动备份数据库和重要文件
  2. 监控告警:配置完善的监控和告警机制
  3. 日志管理:集中收集和分析日志
  4. 安全更新:定期更新系统和应用安全补丁
  5. 性能优化:持续监控和优化系统性能
  6. 容灾准备:制定完善的容灾恢复方案

11.3 联系方式


文档版本: v1.0.0
最后更新: 2024年12月
维护团队: 运维团队