function p4checkpoint {param($depot,[switch]$doCheckpoint,[switch]$doVerify,[switch]$doCleanUp) ## break the discreet operations of the checkpoint operation (checkpoint, compress, archive scan, log compress, cleaner) into individual functions and call from a small state tree. #private functions here. function RobustLogin {param($depot,[switch]$silent) $log = @() for ($tries = 0; ; ++$tries) { if ($tries -eq $static.MaxLoginTries) { throw "Login failed after $maxtries tries, check environment and all supporting files required" } try { ("Connecting to Perforce server " + $depot) $loginResults = p4sudo $depot if ($loginResults.loginsucceed -eq $true) { if (!$silent) { ColorConsole -FGColor green -message ("Login Succeeded: " + $loginResults.user + "@" + $loginResults.server + ":" + $loginResults.port) } break; } } catch { $exception = $error[0].exception.message; if ($exception -notmatch "Login failed, check server connection or login account ") { throw; } } ($exception + "Waiting " + $static.LoginWaitRetrySeconds + "seconds...") sleep $static.LoginWaitRetrySeconds } } function GetDepotStats { $files = gci ($backupdir + "\" + $static.FilenamePrefix + "*") -exclude *.md5 | ?{(($_.name -like "*.ckp*") -or ($_.name -like "*.jnl*"))} | sort lastwritetime -desc if ($files) { "Last checkpoint size:" "{0} = {1:0.00}GB" -f $files[1].name, ($files[1].length / 1GB) "{0} = {1:0.00}GB" -f $files[0].name, ($files[0].length / 1GB) "----------" } else { "Last checkpoint:" "No files found." "----------" } if ($files) { "Checkpoint store:" $files | % ` { $totalFiles = 0; $totalSize = 0; } ` { ++$totalFiles; $totalSize += $_.length } ` { "All checkpoints: {0} files, {1:0.00}GB total size" -f $totalFiles, ($totalSize / 1GB) "Oldest checkpoint is {0:0.0} days old" -f ($start - $files[-1].lastwritetime).totaldays "----------" } } else { "No checkpoints to perform statistics on." "----------" } } function CompressFile {param($fileFullPath) if (Test-Path $fileFullPath) #compress the file, delete the uncompressed file. { if (Test-Path ($fileFullPath + ".gz")) #if the file we're going to compress already exists, rename the existing one. { $oldFileFullPath = Uniquify-Path($fileFullPath + ".gz") #pre-emptively create a new file name. "A previous compressed file was found. Renaming to:`r`n" + $oldFileFullPath try { ren ($fileFullPath + ".gz") $oldFileFullPath } catch { write-warning "Could not modify files in $backupdir. No files were saved, moved, removed, or compressed." } } try { #"Compressing " + (gci $fileFullPath).name iex ($gzipPath + " " + $fileFullPath) | fix-transcript if ((test-path $fileFullPath) -and (test-path $fuleFullPath + ".gz"))#if the old file exists, and a new gzip file is there, remove the old uncompressed file. { remove-item $fileFullPath } } catch { write-warning "Could not modify files in $backupdir. No files were saved, moved, or compressed." } } else { ColorConsole -FGColor yellow -message "No file to compress. Couldn't locate:`r`n" + $fileFullPath } } # end private function declarations p4start-scriptinglog #start the event recorder. $start = get-date trap { p4fail-scriptinglog #what to do if we break or fail. return } if (!$depot) { if (!$env:p4server) { write-error "No p4 server specified" #if a depot isn't already authenticated, and we didn't specify one in command parameter, error and break break } else { $depot = $env:p4server #set depot from logged in session. } } $env:CYGWIN = "nodosfilewarning" $replTopology = $null #clear if set. $static = ([xml](gc $env:depotsxml)).perforce.checkpointService #load static definitions for the checkpoint service $p4static = ([xml](gc $env:depotsxml)).perforce.depot | ?{$_.name -eq $depot}#load the perforce server specific static definitions for the checkpoint service if ($p4static.clusterName) #if this is a replica node, we need to switch context to the checkpoint server defined in the replication nodes. { ColorConsole -FGColor red -message "This server is a replica member. Switching context to checkpoint role server." try { $replTopology = p4get-replicationtopology -clustername $p4static.clusterName } catch { write-error $error[0] throw "Cluster definition exists but is incorrectly defined." } $depot = $replTopology.parent.children | ?{$_.checkpointTarget -eq "True"} | %{$_.hostname} #select the first replication target out of the list. Perhaps in the future this can be a round robin replication selection. $replTargetHostname = $depot #store this since depot context does switch throughout this function. need to reference it for later. $p4staticMaster = ([xml](gc $env:depotsxml)).perforce.depot | ?{$_.name -eq $replTopology.parent.hostname} #copy original static config xml from master to a specific var for master only. Referenced in a few places regarding log rotations and error scans. $p4static = ([xml](gc $env:depotsxml)).perforce.depot | ?{$_.name -eq $depot} #load the checkpoint target role into p4static. ("Checkpoint will execute on server " + $depot) "----------" } if ($doCheckpoint -or $doCleanup) #this affects a number of options within the maintainence script. { $safetyState = $false #If $safetyState is $false, this means that it's "not safe" and actually performing possibly irrevocable changes like deleting or moving checkpoint files and logs. } else { $safetyState = $true #if $safetyState is $true, this means that a number of operations that involve moving files or sweeping things up will actually happen. Instead, the operations will only display what may have happened. This should be a seperate action from actually performing a checkpoint. Checkpoing operations are assumed to be "unsafe" because it requires moving logs and checkpoint files. } if ($doCheckpoint -and $doCleanup) { $doCleanup = $null #if someone decides that if one flag set is good, then setting both must be better - clear the cleanup flag since that happens during a normal checkpoint. } #assemble a few variables for ease of use if ($p4static.clustername) { #clustered checkpoint config $logfile = "\\" + $p4staticMaster.hostname + "\" + $p4staticMaster.svcRootDrive + "$\" + $p4staticMaster.svcRootPath + "\" + $p4static.checkpointPrefs.logFilename #set active log file path to administrative share/path for master $logfileTarget = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.checkpointPrefs.BackupPath + "\" + $p4static.checkpointPrefs.logFilename #the named logfile location that logfiles will get moved to. $backupdir = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.checkpointPrefs.BackupPath #create backupdir to administrative share/path $servicedir = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.svcRootPath #this does not need to be modified for an alternate checkpoint target replica scenario. } else { if ($p4static.checkpointPrefs.RemoteCheckpoint -eq $true) { #non-cluster remote checkpoint config $logfile = "\\" + $p4static.hostname + "\" + $p4static.svcRootDrive + "$\" + $p4static.svcRootPath + "\" + $p4static.checkpointPrefs.logFilename #set active log file path to administrative share/path $logfileTarget = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.checkpointPrefs.BackupPath + "\" + $p4static.checkpointPrefs.logFilename #prefix for log file rotation target $backupdir = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.checkpointPrefs.BackupPath #create backupdir to administrative share/path $servicedir = "\\" + $p4static.hostname + "\" + $p4static.checkpointPrefs.RootDrive + "$\" + $p4static.svcRootPath #this does not need to be modified for an alternate checkpoint target replica scenario. } else { #non-cluster local checkpoint config if ($p4static.clusterName) #too many assumptions can be made about executing a local checkpoint that actually runs as a replica target. Refuse to execute checkpoint in the condition of locally configured checkpoint in a replica node environment. { throw "Cannot perform clustered checkpoint action on a local node. Check your topology and process." } $backupdir = $p4static.checkpointPrefs.RootDrive + ":\" + $p4static.checkpointPrefs.BackupPath #create backupdir to local system path $logfileTarget = $p4static.checkpointPrefs.RootDrive + ":\" + $p4static.checkpointPrefs.BackupPath + "\" + $p4static.checkpointPrefs.logFilename#prefix for log file rotation target $logfile = $p4static.svcRootDrive + ":\" + $p4static.svcRootPath + "\" + $p4static.checkpointPrefs.logFilename #set active log file path to local system path $servicedir = $p4static.checkpointPrefs.RootDrive + ":\" + $p4static.svcRootPath if ((test-path $backupdir) -eq $false) { throw ("Path to BackupPath is not accessable. Tried:`r`n" + $backupdir) #if the path cannot be found, we can't do a backup. write error and exit. } if ((test-path $logfile) -eq $false) { write-warning ("No current log file found at:`r`n" + $logfile) #there are legitimate reasons to not have a log file, so we just log a warning. } } } if ((test-path $backupdir) -eq $false) #verify backupdir is accessable { throw ("Path to BackupPath is not accessable. Tried:`r`n" + $backupdir) #if the path cannot be found, we can't do a backup. write error and exit. } if ((test-path $logfile) -eq $false) #verify logfile path is accessable { write-warning ("No current log file found at:`r`n" + $logfile) #there are legitimate reasons to not have a log file, so we just log a warning. } try { $gzipPath = (get-command gzip).path #verify gzip exists for compressing logfiles. } catch { "This tool requires gzip. Copy correct executable to a directory included in system path and try again." break } if ($p4static.checkpointPrefs.checkpointDeleteDays) #first thing, clear out old stuff in case space is slim { $deleteDays = $p4static.checkpointPrefs.checkpointDeleteDays $Now = get-date "Looking for archived checkpoints older than $deleteDays days." $files = gci $backupdir | ?{(($_.name -like "*.md5*") -or ($_.name -like "*.ckp*") -or ($_.name -like "*.jnl*"))} #list files in backup dir that are perforce checkpoint related $files = $files | ?{$_.lastwritetime -le ($Now.AddDays(-[int]$p4static.checkpointPrefs.checkpointDeleteDays))} #just locate files that match the checkpointDeleteDays parameter. #that operation could be put into one long horrible pipe but I didn't want to make this harder to understand than it already is. if ($files) { if ($safetyState -eq $false) { "The following checkpoint files have been purged from the system:`r`n" } else { ColorConsole -FGColor yellow -message "The following checkpoint files would have been purged, if doCleanup or doCheckpoint was invoked:`r`n" } foreach ($file in $files) #remove files limited by checkpointDeleteDays { if ($safetyState -eq $false) { clear-itemproperty $file.fullname -name attributes -force remove-item $file.fullname $file } else { ColorConsole -FGColor yellow -message $file } } } else { "No qualifying checkpoint files found." } "----------" } if ($p4static.checkpointPrefs.logDeleteDays) #first thing, clear out old stuff in case space is slim { $deleteDays = $p4static.checkpointPrefs.logDeleteDays $Now = get-date ("Looking for uncompressed archived logs older than " + $deleteDays + " days.") $files = gci $backupdir | ?{$_.name -like "*.log*"} #list log files that haven't been compressed. $files = $files | ?{$_.lastwritetime -le ($Now.AddDays(-[int]$p4static.checkpointPrefs.logDeleteDays))} #filter listing to match logDeleteDays preferences. #also split into two sections for clarity. if ($files) { if ($safetyState -eq $false) { "The following log files have been purged from the system:`r`n" } else { ColorConsole -FGColor yellow -message "The following log files would have been purged, if doCleanup or doCheckpoint was invoked:" } foreach ($file in $files) { if ($safetyState -eq $false) { clear-itemproperty $file.fullname -name attributes -force remove-item $file.fullname $file.fullname } else { $file.fullname } } } else { "No qualifying archived logs found." } "----------" } #start the login procedure RobustLogin $depot "----------" ColorConsole -FGColor yellow -message "Collecting counter statistics" $counters = p4get-counters ColorConsole -FGColor yellow -message $counters #get counters that we care about. "----------" #Print out current counters. #actual checkpoint action if ($doCheckpoint) { if ($replTopology) #if the selected checkpoint service has a replication prefs configuration, perform a replica specific checkpoint operation { if ($replTargetHostname -eq $p4static.name) { ("Scheduling uncompressed journal checkpoint on replica role partner: " + $replTargetHostname) $exec = "p4 admin checkpoint -Z " + $BackupDir + "\" + $static.FilenamePrefix #don't compress the journal (capital -Z) ColorConsole -FGColor green -message $exec iex $exec "----------" ("Truncating journal on master server " + $replTopology.parent.hostname) RobustLogin $replTopology.parent.hostname $exec = "p4 admin journal" ColorConsole -FGColor green -message $exec iex $exec "----------" ("Forcing replica pull and waiting for checkpoint to conclude on replica role partner " + $replTargetHostname) ColorConsole -FGColor yellow -message "This operation may take some time to complete." $ckpStart = get-date "Waiting 10 seconds for replication member to settle." sleep 10 "Waiting for login context to previous server..." RobustLogin $replTargetHostname "Waiting 10 second for replication member to settle." sleep 10 #wait for things to settle on repl member. If we don't sometimes it gets in a state where a pull command is ineffective. $exec = ("p4get-replicationstate -clusterMember " + $replTargetHostname) ColorConsole -FGColor green -message $exec iex $exec $ckpStop = get-date $checkpointTimeSpan = new-timespan $ckpStart $ckpStop ("Checkpoint took approximately " + ("{0:N3}" -f $checkpointTimeSpan.TotalSeconds) + " seconds to complete.") } else { throw "Selected replication checkpoint target and configured replication target do not match. Resolve errors in replication configuration before continuing." } } else #if there are no replication preferences in the loaded config, execute a non-replication specific checkpoint operation. { "Started checkpoint at $start" if ($p4static.checkpointprefs.checkpointCompress -eq $true) #compress the checkpoint and journal (lowercase -z) { "Executing compressed journal checkpoint:" $exec = "p4 admin checkpoint -z " + $BackupDir + "\" + $static.filenamePrefix ColorConsole -FGColor green -message $exec iex $exec "----------" } else #don't compress the journal (capital -Z) { "Executing uncompressed journal checkpoint:" $exec = "p4 admin checkpoint -Z " + $BackupDir + "\" + $static.FilenamePrefix ColorConsole -FGColor green -message $exec iex $exec "----------" } } "Post-checkpoint store statistics" GetDepotStats $end = get-date $elapsed = $end - $start "Ended checkpoint at $end, elapsed time $($elapsed)" $exec = "p4 counter checkpoint " + $counters.change if ($p4static.clusterName) #if the selected checkpoint service has a replication prefs configuration, update counters on master (can't do that on replica) { RobustLogin $p4staticMaster.name -silent #log into master iex $exec #update counters ColorConsole -FGColor green -message $exec RobustLogin $p4static.name -silent #log back into replica } else { iex $exec #update counters, since we're on a master. ColorConsole -FGColor green -message $exec } $counters = p4get-counters #get new counters "Counters after checkpoint:" #print to screen for log. $counters "----------" } else { "Pre-checkpoint store statistics" GetDepotStats ColorConsole -FGColor yellow -message "Specify -doCheckpoint to invoke checkpoint action.`r`nRefusing to perform checkpoint." "----------" } #clear old journals on replica if ($replTopology) { if (p4get-journallist) { if ($safetyState -eq $false) #if our safety state is set to perform irrevocable actions do it. { "Purging unneeded journal files:" p4get-journallist -purge -noConfirm } else { ColorConsole -FGColor yellow -message "Use -doCleanup or -doCheckpoint to purge unused journal files.`r`nNot purging:" p4get-journallist } "----------" } } if ($DoVerify) { $start = get-date "Started verify at $start" $failures = p4get "verify -q //..." if ($failures.count) { "Verification failures detected!" $failures write-warning "$failureCount verification failures" } else { "Verification completed with no errors" } $end = get-date $elapsed = $end - $start "Ended verify at $end, elapsed time $($elapsed)" $exec = "p4 counter verify " + $counters.change if ($p4static.clusterName) #if the selected checkpoint service has a replication prefs configuration, update counters on master (can't do that on replica) { RobustLogin $p4staticMaster.name -silent #log into master iex $exec #update counters ColorConsole -FGColor green -message $exec RobustLogin $p4static.name -silent #log back into replica } else { iex $exec #update counters, since we're on a master. ColorConsole -FGColor green -message $exec } "----------" } else { "No verify operation will be performed on this server." "----------" } if ($p4static.checkpointPrefs.logErrorReport -eq $true) { if (test-path $logfile) { "Scanning log for errors..." $logErrors = type $logfile | ` select-string -context 8 -pattern 'server error' | ` ?{ $_.context.postcontext[1] -notmatch 'connection from .* broken' } # filter out 'errors' from people cancelling if ($logErrors) { if ($safetyState -eq $false) { $badPath = Uniquify-Path("$backupdir\" + $static.FilenamePrefix + "." + ($counters.checkpoint) + ".ERRORS.log") #try to match up checkpoint id to the logs we just found to be helpful. Date groupings are easier to match though. try { $logErrors > $badPath #export found log errors. } catch { write-warning "Could not modify files in $backupdir. No log files were saved, moved, or compressed." } if ($p4static.checkpointPrefs.CompressLogs -eq $true) #path is different depending on if logs get compressed or not. { write-warning "Errors found in server log, error extract stored in $badPath.gz" } else { write-warning "Errors found in server log, error extract stored in $badPath" } } else { write-warning "Errors found in server log. Specify -doCleanup or -doCheckpoint to rotate logs and capture error messages for review." } } "----------" } else { "No log file found to scan for errors." "----------" } } if ($p4static.checkpointPrefs.checkpointArchiveDays) { #zzz Read the comment below. <# Expand this block to handle journal files for replica services. clean up uncompressed, complete, truncated journal files on master server. basically: check the replication state on a replica, ensure that the journal number for the current journal is higher than the journal.nnn file sitting in the root of p4. If there is a) no note of the old journal number in the report (p4 pull -l -j) and the current journal number is higher, it's safe to remove from the master. #> $Now = get-date #get the date $files = gci $servicedir | ?{(($_.name -like "*.md5*") -or ($_.name -like "*.ckp*") -or ($_.name -like "*.jnl*"))} #look for files with .md5, .ckp, or .jnl in their names, and move to the backup dir folder. $files = $files | ?{$_.lastwritetime -le ($Now.AddDays(-[int]$p4static.checkpointPrefs.checkpointArchiveDays))} if ($files) { if ($safetyState -eq $false) { "Archiving checkpoint files to " + $backupdir foreach ($file in $files) { move $file.FullName $backupdir $file.Name } } else { ColorConsole -FGColor yellow -message "Use -doCleanup or -doCheckpoint to archive checkpoint files.`r`nNot moving:" foreach ($file in $files) { $file.Name } } "----------" } } if ($p4static.checkpointPrefs.logArchiveDays) { if (Test-Path $logfile) #if a live log file exists, do the job. { if ($safetyState -eq $false) { $start = get-date $archivedLog = $logfiletarget + "-" + $counters.checkpoint + ".ARCHIVED.log" "Rotating active log" if (Test-Path $archivedLog) #if the log file we're going to archive to already exists, rename the existing one before proceeding. { $oldPath = Uniquify-Path("$logfileTarget\" + $static.FilenamePrefix + ($counters.checkpoint) + ".ARCHIVED.log") #pre-emptively create a new log file name - there are a number of cases where log file names overlap, this creates a unique name for the log file. ColorConsole -FGColor yellow -message ("A previous archived log file was found with a duplicate name. Renaming existing log file to " + $oldPath) try { ren $archivedLog $oldPath } catch { write-warning "Could not modify files in $backupdir. No log files were saved, moved, or compressed." } } ("Renaming active log file " + $p4static.checkpointPrefs.logFilename + " to " + $archivedLog) for ($tries = 0;($tries -le $p4static.checkpointPrefs.logMaxGrabTries) -or ($tries -gt 20); ++$tries) { if ($tries -eq $p4static.checkpointPrefs.logMaxGrabTries) { write-error ("Giving up log file grab after " + $p4static.checkpointPrefs.logMaxGrabTries + " tries.") } try { copy $logfile $archivedLog if (test-path $archivedLog) { remove-item $logfile } break; } catch { $exception = $error[0].exception.message; if ($exception -notmatch "used by another process") { throw; } $waitSeconds = 5 ($exception + "Waiting " + $waitSeconds + "seconds...") sleep $waitSeconds } } $end = get-date $elapsed = $end - $start "Ended log archive at $end, elapsed time $($elapsed)" $exec = "p4 counter logarchive " + $counters.change if ($p4static.clusterName) #if the selected checkpoint service has a replication prefs configuration, update counters on master (can't do that on replica) { RobustLogin $p4staticMaster.name -silent #log into master iex $exec #update counters ColorConsole -FGColor green -message $exec RobustLogin $p4static.name -silent #log back into replica } else { iex $exec #update counters, since we're on a master. ColorConsole -FGColor green -message $exec } "----------" } else { ColorConsole -FGColor yellow -message ("Active log exists at " + $logfile + ".`r`nUse -doCleanup or -doCheckpoint to roll active log files.") "----------" } } else { "No log file found to compress and archive." "----------" } } ## disk resource control #check percentage free before performing a checkpoint operation #if it's less that user defined percentage, in the future, figure out how many old checkpoints to remove # #estimate the size of a new checkpoint, based on last checkpoint # if ($p4static.checkpointPrefs.diskPercentReserve) { $servicevolume = (get-wmiobject win32_logicaldisk -ComputerName $p4static.hostname) | ?{$_.deviceID -match $p4static.svcRootDrive} #get service disk data $freespace =([math]::round(($servicevolume.Size - $servicevolume.FreeSpace) / $servicevolume.size,2) + ($p4static.checkpointPrefs.diskPercentReserve / 100)) #calculate free space in percent to two decimals. if ($freespace -gt $p4static.checkpointPrefs.diskPercentReserve) { write-warning ("Perforce service volume " + $servicevolume.DeviceID +" is less than " + $p4static.checkpointPrefs.diskPercentReserve + " percent free.`r`n") "{0:N0}" -f ($servicevolume.size / 1024) + "`tgigabytes total size" "{0:N0}" -f ($servicevolume.freespace / 1024) + "`tgigabytes free" ([math]::round(($servicevolume.Size - $servicevolume.FreeSpace) / $servicevolume.size,4)).toString() + "`t`tpercent used" "----------" } else { ("Perforce service volume " + $servicevolume.DeviceID +" is greater than " + $p4static.checkpointPrefs.diskPercentReserve + " percent free.`r`n") "{0:N0}" -f ($servicevolume.size / 1024) + "`tgigabytes total size" "{0:N0}" -f ($servicevolume.freespace / 1024) + "`tgigabytes free" ([math]::round(($servicevolume.Size - $servicevolume.FreeSpace) / $servicevolume.size,4)).toString() + "`t`tpercent used" "----------" } } if ($p4static.checkpointPrefs.checkpointArchiveCompressDays) #compress checkpoints moved to the backup directory { $files = gci $backupdir -exclude *.gz | ?{(($_.name -like "*.md5*") -or ($_.name -like "*.ckp*") -or ($_.name -like "*.jnl*"))} | Group {$_.LastWriteTime.ToString("yyyy-MM-dd")} | Sort-Object -Property Name #list all the files matching the type in the backupdir excluding already compressed files, and sort then group by day, the format for the date string makes it easy to group, unfortunately we lose the datetime object. $now = get-date #get the date now if ($files) { "There are checkpoint files that may need compression." foreach ($set in $files) #iterate through the grouped file sets { if (((new-timespan -start (get-date $set.name.tostring()) -end $now)).TotalDays -ge $p4static.checkpointPrefs.checkpointArchiveCompressDays) #if the file set timespan from 12:01am on the day it was created is longer than the compresseddays configured value, compress the file. { if ($safetyState -eq $false) #if safety state is unsafe, perform the operation. (all operations are considered unsafe) { foreach ($file in $set.group) #iterate through each file in the group to be compressed { CompressFile $file.fullname } } } else { ColorConsole -FGColor yellow -message "Use -doCleanup or -doCheckpoint to compress outstanding checkpoint files.`r`nNot compressing:" foreach ($file in $set.group) { $file.fullname } } } "----------" } else { "No outstanding uncompressed checkpoint files found." "----------" } } if ($p4static.checkpointPrefs.logCompress -eq $true) #compress log files in backup directory { if ($safetyState -eq $false) { "Beginning log compression" if ($badpath) { ("Compressing error log results to " + (gci $badpath).name + ".gz") if (Test-Path $badpath) #compress the found log error messages, delete the uncompressed file. { CompressFile $badpath } else { write-warning "Expected error log extract file to compress.`r`nTried to compress: $badpath" } } else { "No error log results found to compress." } if ($archivedLog) { ("Compressing archived log:`r`n" + $archivedLog) if (test-path $archivedLog) { CompressFile $archivedLog } else { ColorConsole -FGColor yellow -message ("Ignoring archive command, no log file exists (yet) at:`r`n" + $archivedLog) } } else { ColorConsole -FGColor yellow -message "No archived log file to compress." } } else { ColorConsole -FGColor yellow -message "Use -doCleanup or -doCheckpoint to compress remaining error and archive logs.`r`nlogCompress option is set to true." } "----------" } if ($p4static.checkpointPrefs.logErrorReport -eq $true) #write this at the end of the transcript so we don't lose important log data about the checkpoint operation. Logs can be reviewed on the file share. { if ($logErrors) { $logErrors | %{ $_; "" } } } p4stop-scriptinglog }