Docker Compose
If you're worried that using latest
will pull new versions that could break your setup, you can specify a suitable version number.
version: "4.5"
services:
tsumugi-db:
image: mariadb:latest
volumes:
- tsumugi-mariadb_data:/var/lib/mysql
restart: always
environment:
MARIADB_ROOT_PASSWORD: your-mariadb-root-pwd
MARIADB_DATABASE: your-wordpress-db
MARIADB_USER: yourDbUserForWp
MARIADB_PASSWORD: yourMariaDbPassword
tsumugi-wordpress:
depends_on:
- tsumugi-db
#links:
# - mariadb:mysql
image: wordpress:latest
volumes:
- tsumugi-wordpress_data:/var/www/html
- tsumugi-wordpress_php:/usr/local/etc/php
restart: always
environment:
WORDPRESS_DB_HOST: tsumugi-db
WORDPRESS_DB_USER: yourDbUserForWp
WORDPRESS_DB_PASSWORD: yourMariaDbPassword
WORDPRESS_DB_NAME: your-wordpress-db
zunda-db:
image: mariadb:latest
volumes:
- zundamon-mariadb_data:/var/lib/mysql
restart: always
environment:
MARIADB_ROOT_PASSWORD: some-mariadb-root-pwd
MARIADB_DATABASE: zundamon-wordpress
MARIADB_USER: zundamochi114514
MARIADB_PASSWORD: some-mariadb-password
zundamon-wordpress:
depends_on:
- zunda-db
image: wordpress:latest
volumes:
- zundamon-wordpress_data:/var/www/html
- zundamon-wordpress_php:/usr/local/etc/php
restart: always
environment:
WORDPRESS_DB_HOST: zunda-db
WORDPRESS_DB_USER: zundamochi114514
WORDPRESS_DB_PASSWORD: some-mariadb-password
WORDPRESS_DB_NAME: zundamon-wordpress
WORDPRESS_TABLE_PREFIX: wpzundamochi_
https-portal:
image: steveltn/https-portal:1
ports:
- "192.168.19.19:80:80"
- "192.168.19.19:443:443"
restart: always
environment:
DOMAINS: 'www.zundamon-kawaii.com -> http://tsumugi-wordpress:80, blog.zundamon.co.jp -> http://zundamon-wordpress:80, www.zundamon.co.jp -> http://zundamon-wordpress:80, zundamon.co.jp -> http://zundamon-wordpress:80'
CLIENT_MAX_BODY_SIZE: 500M
STAGE: 'production' # Don't use production until staging works
# FORCE_RENEW: 'true'
volumes:
- https-portal-data:/var/lib/https-portal
volumes:
tsumugi-mariadb_data: {}
tsumugi-wordpress_data: {}
tsumugi-wordpress_php: {}
zundamon-mariadb_data: {}
zundamon-wordpress_data: {}
zundamon-wordpress_php: {}
https-portal-data: {}
Troubleshooting
- Browser Developer Console shows
413 Request entity too large
- The
https-portal
needs an environment variable adjustment:
- The
CLIENT_MAX_BODY_SIZE: 500M
If you accidentally add a semicolon after 500M
like CLIENT_MAX_BODY_SIZE: 500M;
, the container will still run, but the website will not respond. Check the https-portal
error.log, and you will see an error message similar to this (my volume configuration is located in the dd87****87b folder):
2024/07/19 13:52:01 [emerg] 59#59: unexpected ";" in /etc/nginx/nginx.conf:56
- Unable to upload files larger than 2MB to WordPress
- Since my Compose configuration directly maps PHP to a volume, you can create an
uploads.ini
file in/var/lib/docker/volumes/yourstack-zundamon-wordpress_php/_data/conf.d
with the following content:
- Since my Compose configuration directly maps PHP to a volume, you can create an
file_uploads = On
memory_limit = 500M
upload_max_filesize = 500M
post_max_size = 500M
max_execution_time = 600
Backup Script
#!/bin/bash
# Define variables
NFS_SERVER="192.168.x.x" # Destination Hostname
NFS_PATH="/volume1/Backup-NFS" # Destination directory
LOCAL_PATHS=(
"/var/lib/docker/volumes/yourblog_mariadb_data/_data"
"/var/lib/docker/volumes/yourblog_wordpress_data/_data"
#...add and adjust as needed, no comma at the end of the string
)
MOUNT_POINT="/mnt/backup_nfs"
DATE_NOW=$(date +'%Y%m%d%H%M%S')
BACKUP_FILE="$MOUNT_POINT/web/websiteBackup_$DATE_NOW.tar.gz"
# Create mount point
mkdir -p $MOUNT_POINT
# Check if NFS is already mounted
mountpoint -q $MOUNT_POINT
if [ $? -ne 0 ]; then
echo "Mounting NFS shared directory..."
mount -t nfs $NFS_SERVER:$NFS_PATH $MOUNT_POINT
if [ $? -ne 0 ]; then
echo "Failed to mount NFS shared directory"
exit 1
fi
fi
# Compress and backup data
tar -czf $BACKUP_FILE -C / ${LOCAL_PATHS[@]}
# Delete excess backups
find $MOUNT_POINT -name "websiteBackup_*.tar.gz" -type f -print | while read FILE; do
FILE_DATE=$(basename $FILE | sed 's/websiteBackup_\(.*\)\.tar\.gz//')
FILE_EPOCH=$(date -d "${FILE_DATE:0:8}" +%s)
NOW_EPOCH=$(date +%s)
AGE=$(( (NOW_EPOCH - FILE_EPOCH) / 86400 ))
if [ $AGE -le 7 ]; then
# Keep one backup per day for the last 7 days
continue
elif [ $AGE -le 30 ]; then
# Keep one backup per week for the last month
FILE_DAY=$(date -d "${FILE_DATE:0:8}" +%u)
if [ $FILE_DAY -eq 1]; then
continue
fi
elif [ $AGE -le 365]; then
# Keep one backup per month for the last year
FILE_DAY=$(date -d "${FILE_DATE:0:8}" +%d)
if [ $FILE_DAY -eq 1]; then
continue
fi
elif [ $AGE -gt 365]; then
# Keep one backup per year
FILE_MONTH_DAY=$(date -d "${FILE_DATE:0:8}" +%m%d)
if [ $FILE_MONTH_DAY -eq "0101"]; then
continue
fi
fi
# Delete files that do not meet the retention rules
rm -f $FILE
done
crontab -e
with micro
editor
Facebook 留言