#!/bin/bash set -u # Get EBS volumes with Name tag of <host>-root, <host>-hxdepots, and <host>-hxlogs, # e.g. perforce-01-root, perforce-01-hxdepots, and perforce-01-hxlogs. Take snapshots, # and tag them with the current journal counter. declare ThisScript="${0##*/}" declare Version="1.3.0" declare ThisHost=${HOSTNAME%%.*} declare VolumeBaseName= declare VolumeName= declare SnapshotName= declare VolumeId= declare SnapshotId= declare Cmd= declare -i ExitCode=0 declare CurrentJournal= declare SnapshotAgeToExpire= declare CurrentDate= function msg () { echo -e "$*"; } function errmsg () { msg "\\nError: ${1:-Unknown Error}\\n"; ExitCode=1; } function bail () { errmsg "${1:-Unknown Error}"; exit "${2:-1}"; } # Login to the server so the next command will work properly when testing the script in isolation /p4/common/bin/p4login # Get the latest journal version from the db.counters database CurrentJournal=$("$P4DBIN" -r "$P4ROOT" -k db.counters -jd - 2>&1 | grep @journal@ | cut -d '@' -f 8) # Set the 'aging off' duration, in days; snapshots older than this will be deleted SnapshotAgeToExpire=90 msg "Started ${0##*/} v$Version at $(date)." for VolumeBaseName in root hxdepots hxlogs; do VolumeName="${ThisHost}-${VolumeBaseName}" VolumeId=$(aws ec2 describe-volumes --filters Name=tag:Name,Values="$VolumeName" --query 'Volumes[*].{ID:VolumeId}' --output text) if [[ -z "$VolumeId" ]]; then errmsg "Could not determine VolumeId for $VolumeName. Skipping it." continue fi msg "Snapshotting volume $VolumeName [$VolumeId]." Cmd=$(aws ec2 create-snapshot --description "${VolumeName} snapshot created by ${ThisScript} on main p4d instance" --volume-id "$VolumeId" --query SnapshotId --output text) msg "Running: ${Cmd[*]}" SnapshotId=$("${Cmd[@]}") # The tag "Contents" with value "Perforce Checkpoints and Archives" is used in the following section # to identify snapshots that are specific to Perforce, so they can be deleted when they expire. if [[ -n "$SnapshotId" ]]; then msg "Snapshot created for $VolumeName on $ThisHost with Id: $SnapshotId." SnapshotName="${VolumeName}-${CurrentJournal}" Cmd=$(aws ec2 create-tags --resources "$SnapshotId" \ --tags Key=Host,Value="$ThisHost" \ Key=Contents,Value="Perforce Checkpoints and Archives" \ Key=Name,Value="$SnapshotName" \ Key=Backup,Value="true" \ Key=Lifecycle,Value="Managed by /p4/common/cloud/aws/bin/snapshot.sh on main p4d instance" \ --output text) if "${Cmd[@]}"; then msg "Verified: Resource tags applied." else errmsg "Failed to apply tags to Snapshot $SnapshotId." fi else errmsg "Failed to create snapshot for $VolumeName." fi done ############################################################# # This section automatically deletes snapshots that are # # older than $SnapshotAgeToExpire days. This replaces # # the lifecycle management portion of AWS's Snapshot # # automation, since the snapshots are generated externally. # ############################################################# # Fetching snapshot IDs and their creation dates where tag Contents is 'Perforce Checkpoints and Archives' snapshots_to_check=$(aws ec2 describe-snapshots --query "Snapshots[?Tags[?Key=='Contents' && Value=='Perforce Checkpoints and Archives']].[SnapshotId,StartTime]" --output text) CurrentDate=$(date +%s) while read -r snapshot_id creation_date; do # Converting snapshot creation date to seconds snapshot_date=$(date -d "$creation_date" +%s) # Calculate age of snapshot snapshot_age=$(( (CurrentDate - snapshot_date) / 86400 )) # 86400 seconds in a day # Check if snapshot is older than specified days if [ "$snapshot_age" -ge "$SnapshotAgeToExpire" ]; then echo "Deleting snapshot $snapshot_id which is $snapshot_age days old." aws ec2 delete-snapshot --snapshot-id "$snapshot_id" fi done <<< "$snapshots_to_check" if [[ "$ExitCode" -eq 0 ]]; then msg "Processing completed OK." else msg "Processing completed WITH ERRORS. Review the output above." fi exit $ExitCode
# | Change | User | Description | Committed | |
---|---|---|---|---|---|
#4 | 29954 | C. Thomas Tyler |
Released SDP 2023.1.29949 (2023/12/01). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
#3 | 27761 | C. Thomas Tyler |
Released SDP 2020.1.27759 (2021/05/07). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
#2 | 27331 | C. Thomas Tyler |
Released SDP 2020.1.27325 (2021/01/29). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
#1 | 25245 | C. Thomas Tyler |
Released SDP 2019.1.25238 (2019/03/02). Copy Up using 'p4 copy -r -b perforce_software-sdp-dev'. |
||
//guest/perforce_software/sdp/dev/Server/Unix/p4/common/cloud/aws/bin/snapshot.sh | |||||
#2 | 25108 | C. Thomas Tyler | Corrected comments; no functional change. | ||
#1 | 25104 | C. Thomas Tyler |
Added sample script to create EBS snapshot of volumes with a Name tag of <host>-root and <host>-hxdepots, e.g. perforce-01-root and perforce-01-hxdepots. This is intended to be called at the optimal time to reduce risk exposure. The optimal time is immediately after a journal rotation completes near the start of the overall daily checkpoint process, or optionally immediately after the offline checkpoint is created. This script creates 2 EBS snapshots with appropriate resource tagging each time it is run. Note that a full recovery would entail mounting these 2 volumes, creating new hxdepots and hxmetadata volumes, finalizing the SDP structure, etc. This is fairly straightforward, but not trival, and is needed only as a Plan B for recovery. Plan A is to use Perforce replication to a secondary instance for fast and easier recovery. Basic data retention polices can be implemented with EBS Data Lifecycle Policies. Custom automation can copy recovery assets to S3 Glacier for long term storage. In addition to this script, a new high-level SDP structure is created, /p4/common/cloud. Under the new cloud directory is a directory for the cloud provider, e.g. one of aws, azure, gcp, rackspace, etc. Cloud-provider specific files can go in there. |