#!/usr/bin/python # processes the Perforce Journal entries for use in hot-backup of the # primary server. # TODO: add a test for connection ability from the remoting at startup time. # TODO: add a queue for the RemoteUpdateDepot job import Journal, listener, cutil, os, Setup, RemoteUtil, Serializer class RemoteUpdateJournal(listener.Listener): NODATA_THRESHOLD = 30 DATA_THRESHOLD = 100 READ_INTERVAL = 5 def __init__(self): listener.Listener.__init__(self) self.ssh = None self.noDataCount = RemoteUpdateJournal.NODATA_THRESHOLD self.dataCount = 0 self.noReadCount = 0 cmd = [] for x in Setup.BACKUP.P4D: cmd.append(x) for x in Setup.BACKUP.P4D_JOURNAL_OPTS: cmd.append(x) cutil.log(cutil.INFO, ("Using backup server p4d command [", cmd,"]")) self.p4d_cmd = cmd if Setup.SERVER.maxSSHLineTransfer: RemoteUpdateJournal.DATA_THRESHOLD = Setup.SERVER.maxSSHLineTransfer def isHandled(self, obj): # we can do checks here to limit the amount of traffic, such # as "don't write to the db.have file" return not obj.incomplete def listen(self, obj): self.noDataCount = 0 self.dataCount += 1 if self.ssh == None: self.ssh = RemoteUtil.SSH_CMD(self.p4d_cmd) if not self.ssh.isAlive(): self.ssh.connect() self.ssh.sendToServer(obj.lines) self.noReadCount += 1 if self.noReadCount > RemoteUpdateJournal.READ_INTERVAL: self.noReadCount = 0 outLines = self.ssh.readFromServer() # TODO: check for errors from the server if self.dataCount > RemoteUpdateJournal.DATA_THRESHOLD: self.dataCount = 0 # Too much data without a break. Go and update the # server to ensure we aren't too far behind. self.ssh.disconnect() def noData(self, hadData): ''' Called before a pause in polling for data. hadData will be True if the previous poll attempt returned with data. ''' self.noDataCount += 1 self.dataCount = 0 if self.noDataCount == RemoteUpdateJournal.NODATA_THRESHOLD: self.disconnect() def stoppedLoop(self): ''' Called after the poll loop completes. Gives the listeners a chance to clean themselves up. ''' self.disconnect() def disconnect(self): if self.ssh != None: self.ssh.disconnect() class RemoteUpdateDepot(listener.BatchListener): NODATA_THRESHOLD = 3 MAXIMUM_PENDING_SIZE = 5 def __init__(self, localDepotMap = Setup.SERVER.depotDirs, remoteDepotMap = Setup.BACKUP.depotDirs, filename = None): if filename is None: filename = os.path.join(Setup.SERVER.datadir, "RemoteUpdateDepot.ser") if Setup.SERVER.maxRSyncFileTransfer: RemoteUpdateDepot.MAXIMUM_PENDING_SIZE = Setup.SERVER.maxRSyncFileTransfer listener.BatchListener.__init__(self, filename, RemoteUpdateDepot.NODATA_THRESHOLD, RemoteUpdateDepot.MAXIMUM_PENDING_SIZE) self.localDepots = localDepotMap self.remoteDepots = remoteDepotMap # depotFiles is a map from the depot to a list of files # relative to that depot. Since we're using RSYNC, duplicates # should be handled correctly. self.depotFiles = self.add_serializable('depotFiles', Serializer.DictList()) def isHandled(self, obj): # handle any changes that alter the depot return len(obj.fields) > 5 \ and obj.fields[0] == 'pv' \ and obj.fields[2] == 'db.revcx' # as far as I can tell, for db.revcx, the fields are: # 0: pv (always insert) # 1: 0 (version of the type of operation) # 2: db.revcx (database to update) # 3: XXX (changelist number) # 4: XXX (depot file to update) # 5: R (revision of the new file) # 6: M (type of change: 0 = add, 1 = edit, 2 = delete, 3 = integrate for add # what about integrate deletes, integrate edits? ) def enqueue(self, obj): p4name = obj.fields[4] endOfDepotPos = p4name.index('/', 2) depot = p4name[2:endOfDepotPos] # don't include any initial "/" mark in the name, or the joined # path will ignore the depot path. path = p4name[endOfDepotPos+1:] localFile = os.path.join(self.localDepots[depot], path) fname = os.path.basename(localFile) + ',' lDir = os.path.dirname(localFile) cutil.log(cutil.DEBUG, ("Checking [", lDir, "] for files starting with [", fname, "]")) if not os.path.exists(lDir): cutil.log(cutil.DEBUG, ("Directory doesn't exist: it must be ", "an integration that didn't create files.")) else: dH = [] for x in os.listdir(lDir): if x.startswith(fname): pathname = os.path.join(lDir, x)[len(self.localDepots[depot])+1:] cutil.log(cutil.DEBUG, ("Adding [", pathname, "]")) dH.append(pathname) self.depotFiles.extend(depot, dH) def flush(self): # This should really happen in a different thread. if self.depotFiles.has_keys() > 0: for (depotName, files) in self.depotFiles.items(): RemoteUtil.rsync( self.localDepots[depotName], self.remoteDepots[depotName], files) self.depotFiles.clear(depotName)
# | Change | User | Description | Committed | |
---|---|---|---|---|---|
#5 | 5850 | Matt Albrecht | Update logDaemon to version 1.5. | ||
#4 | 5810 | Matt Albrecht |
New version of logDaemon that has better bug fixes. Also added server-status web page for a pure client-side AJAX app that pulls together different server-side statistics. |
||
#3 | 5805 | Matt Albrecht | Update to version 1.3-alpha | ||
#2 | 5780 | Matt Albrecht | Update tool with lots of goodies | ||
#1 | 5424 | Matt Albrecht | Add a journal file daemon tool. |