#******************************************************************** # # Copyright (C) 2005-2006 Hari Krishna Dara # # This file is part of p4admin. # # p4admin is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # p4admin is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. # #******************************************************************* import os import os.path import sys import re import logging import threading import config import notify import utils import rsynclib log = logging.getLogger(__name__) def syncData(quickMode): # Sync the depot. if quickMode and config.quickRsyncExcludePath != None: excludeFileOpt = '--exclude-from='+config.quickRsyncExcludePath else: excludeFileOpt = '' for syncDir in config.syncScmFiles: rsynclib.rsyncFile(config.p4HostRemote, config.rsyncRootModuleName, syncDir, config.p4RootLocalPsx, syncDir, excludeFileOpt) return 0 def createJnlExcludesFile(excludeFileName, closeFile=True): excludeFile = open(excludeFileName, 'w') excludeFile.write("# Created by p4admin\n"); excludeFile.write("# This file is used to track the journal files that have been successfully\n") excludeFile.write("# downloaded, and so should be excluded from future syncs\n\n") if closeFile: excludeFile.close() return None else: return excludeFile def syncDb(quickMode): # Truncate the journal on the primary server. if config.truncateJournal: journalPrefix = config.p4JournalDirRemote+"/"+config.journalTruncPrefix result = utils.execute('p4 '+config.p4OptionsRemote+' admin journal -z ' +journalPrefix) if utils.shell_error != 0: notify.sendError('journal truncation failed: ' + result) return 1 else: log.info('Not truncating journal as it is disabled.') if not utils.makeDir(os.path.join(config.p4RootLocal, config.incomingSubDir) ) == 0: return 1 incomingDir = os.path.join(config.p4RootLocal, config.incomingSubDir) excludeFileName = os.path.join(incomingDir, config.jnlExcludeTmpFileName) if not os.path.exists(excludeFileName): createJnlExcludesFile(excludeFileName) # Get the new journal files. # This will bring in journal files created by both the truncation process # and the checkpoint process. The actual name doesn't matter, just the # sequence number. excludeFileOpt = '--exclude-from='+excludeFileName status = 0 status = status | rsynclib.rsyncFile(config.p4HostRemote, config.rsyncJnlModuleName, '*.jnl.*', config.p4RootLocalPsx, config.incomingSubDir+'/', excludeFileOpt, delete=0, # Python doesn't have a ternary operator ignoreMissingSrc=(False, True)[not config.truncateJournal]) if status != 0: log.error("There were errors detected in transferring checkpoint and" " journal files... files are not being deleted from source") elif config.removeJnlsAfterSync: # Successfully copied the journal files, let us now # remove them from server. result = utils.execute('ssh '+config.p4HostRemote+' rm -f '+ config.p4JournalDirRemotePsx+'/"*.jnl.*"') if utils.shell_error != 0: notify.sendError('Failed to remove checkpoint and journal files ' 'on the server: ' + result) return status def recoverFromCheckpt(checkptsDir, journalsDir): log.info('Recovering the perforce database from the latest checkpoint') # Let us find the most recent checkpoint (by the sequence number) and # recover from that. Apply all the journals that are newer than the # checkpoint itself (again, by the sequence number). checkpts = utils.listCheckPts(checkptsDir) if len(checkpts) == 0: notify.sendError("Perforce database is not initialized, and no " "checkpoint files were found to initialize/recover it under: "+ checkptsDir); return 1 latestCheckpt = checkpts[0] log.info('Recoving perforce database from checkpoint: %s', latestCheckpt) if replayJournalSet([(checkptsDir, latestCheckpt)]) != 0: return 1 latestCheckptSeqNum = utils.checkpointSeqNum(latestCheckpt) journals = filter(lambda j: utils.journalSeqNum(j) >= latestCheckptSeqNum, utils.listJournals(journalsDir)) if len(journals) > 0: journals.sort(key=utils.journalSeqNum) log.info('Replaying journals that are newer : %s', journals) if replayJournalSet([(journalsDir, journal) for journal in journals]) != 0: return 1 return 0 def replayJournals(quickMode): """Applies any journals found in the "incoming" folder to the local server. Should be done after the data is successfully transferred. Each journal after successfully applied, is moved from "incoming" folder into "journals" folder.""" incomingDir = os.path.join(config.p4RootLocal, config.incomingSubDir) checkptsDir = os.path.dirname(config.p4JournalLocal) journalsDir = os.path.dirname(config.p4JournalLocal) # TODO: Move the check to config.py if not os.path.exists(journalsDir): log.error("The directory specified for config.p4JournalLocal doesn't exist or is not a directory") return 1 if config.checkptRecoveryRequired: # In case of errors, a notification must have already been sent. if recoverFromCheckpt(checkptsDir, journalsDir) == 0: # We don't want to continue doing this in future runs. config.checkptRecoveryRequired = False if config.notifyCheckpointRecovery: notify.sendInfo("Checkpoint recovery completed successfully", "") # In addition to the above step, we need to apply all journals in incoming # directory. # If incoming directory has no journals, then we have nothing to do (this # is unlikely). journals = utils.listJournals(incomingDir) if len(journals) == 0: log.info('There are no journals to replay in %s', incomingDir) return 0 # Sort the journal names in the order of their sequence number. log.info("Replaying journals in the order: %s", journals) excludeFileName = os.path.join(incomingDir, config.jnlExcludeTmpFileName) if not os.path.exists(excludeFileName): excludeFile = createJnlExcludesFile(excludeFileName, False) else: excludeFile = open(excludeFileName, "a") try: # List comprehension. if replayJournalSet([(incomingDir, journal) for journal in journals], journalsDir, excludeFile) != 0: return 1 finally: excludeFile.close() if not quickMode: log.info('Verifying the databases...') status = utils.execute('p4d -r '+config.p4RootLocal+' -xv') if utils.shell_error != 0: notify.sendError('p4d -xv returned error: ' + status) return 1 log.info("Removing old checkpoints and journals") utils.removeOldCheckpoints(checkptsDir, journalsDir) def replayJournalSet(journalTpl, relocationDir=None, excludesFile=None): for (dir, journal) in journalTpl: journal = os.path.join(dir, journal) log.info("Replaying checkpoint/journal: %s", journal) isGzipped = re.search(r'\.gz$', journal) result = utils.execute('p4d -r '+config.p4RootLocal+' -jr'+ (isGzipped and ' -z ' or ' ')+journal) if utils.shell_error != 0: notify.sendError("Error while replaying checkpoint/journal: "+ journal+"Error: "+result) return 1 if not relocationDir == None: # We successfully replayed the journal, let us move it into the # journals directory. log.info("Relocating checkpoint/journal: %s", journal) status = utils.execute('mv -f '+journal+' '+relocationDir) if utils.shell_error != 0: notify.sendError("Failed to move checkpoint/journal: "+journal+ " to: "+relocationDir+ "\nMessage: " + status+ "\nTo avoid duplicate replay of this file, please " "relocate or remove this file manually.") return 1 if not excludesFile == None: excludesFile.write(os.path.basename(journal)+"\n") return 0 def runSync(quickMode): utils.updateScriptFiles() log.info("Stopping backup perforce server") # Stop the local perforce server so that we can apply the journals. # This will work even if the checkpoint restore is pending. result = utils.execute('p4 '+config.p4OptionsLocal+' admin stop', verbosity=0) # It is ok if the server is already stopped. if utils.shell_error != 0 and not re.search('Connect to server failed', result): notify.sendError("Couldn't stop local perforce server, unknown error: " + result) return 1 log.info("-------Started synchronization...") status = 0 status = status | syncDb(quickMode) status = status | syncData(quickMode) if status == 0: replayJournals(quickMode) #verifyLocalP4Server() log.info("Starting backup perforce server") utils.startPerforceLocal() log.info("-------Finished synchronization.") return status if __name__ == '__main__': runSync(False)