#*******************************************************************************
#
# Copyright (c) 1997-2017, Perforce Software, Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL PERFORCE SOFTWARE, INC. BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#*******************************************************************************
#*******************************************************************************
#* Name : P4::Journal.pm
#* Author : John Halbig <support@perforce.com>
#* Description : Perl module for manipulating Perforce Journal files.
#*
#* Provides methods for parsing, and editing
#* Perforce journal files.
#*
#* Based on the module for P4D 2008.2 by Tony Smith
#* (Who made it easy enough to do; THANKS TONY! :-)
#*
#* $Id: //depot/intranet/consulting/sw/MAIN/journal_tool/Journal.pm#4 $
#*******************************************************************************
#*******************************************************************************
#* Journal Record class. Used to hold ready parsed records.
#*******************************************************************************
package P4::JournalRec;
use AutoLoader;
use strict;
use vars qw( %FIELDMAP %SPEEDMAP $AUTOLOAD );
#
# The fieldmap stores details of the Perforce schema over the years. The
# fields within each version of each table are listed in the order they
# appear in the tables. All fieldnames must be listed in lowercase.
#
%FIELDMAP = (
# obsolete since 2006.2, included for completeness. Removed in 19.1
'db.archive' => [
[ 'lbrfile','lbrrev','depotfile','depotrev','lbrtype' ],
# filetype bit-shift in version 1
[ 'lbrfile','lbrrev','depotfile','depotrev','lbrtype' ],
],
# added 2006.2
'db.archmap' => [
[ 'lbrfile','depotfile' ],
],
# obsolete since 2009.2,included for completeness.
'db.boddate' => [
[ 'key','attr','date' ],
],
# added in 2016.1
'db.bodresolve' => [
[ 'type','attr','tokey','attr','fromkey','fromchange',
'basekey','basechange' ],
# added in 2020.1
[ 'type','shelf','tokey','attr','fromkey','fromchange',
'basekey','basechange','how','state','client' ],
],
# added in 2016.1
'db.bodtextcx' => [
[ 'type','key','change','attr','text' ],
],
# added in 2016.1
'db.bodtexthx' => [
[ 'type','key','attr','bulk','text' ],
],
# added in 2016.1
'db.bodtextsx' => [
[ 'type','shelf','key','attr','text','workchange' ],
[ 'type','shelf','key','attr','text','workchange','user','action' ],
],
# added in 2016.1
'db.bodtextwx' => [
[ 'type','client','key','attr','text','workchange' ],
[ 'type','client','key','attr','text','workchange','user','action' ],
],
'db.bodtext' => [
[ 'key','attr','text' ],
[ 'key','attr','isBulk','text' ],
],
'db.change' => [
[ 'change','desckey','client','user','date','status','description' ],
# 2008.2 add root mapping:
[ 'change','desckey','client','user','date','status',
'description','root' ],
# 2009.2 changed status from type int to ChangeStatus:
[ 'change','desckey','client','user','date','status',
'description','root' ],
# 2015.2 add importer, identity:
[ 'change','desckey','client','user','date','status',
'description','root','importer','identity' ],
# 2016.1 add access:
[ 'change','desckey','client','user','date','status',
'description','root','importer','identity','access' ],
# 2018.2 add update:
[ 'change','desckey','client','user','date','status',
'description','root','importer','identity','access','update' ],
# 2019.1
[ 'change','desckey','client','user','date','status',
'description','root','importer','identity','access','update','stream' ],
],
'db.changeidx' => [
[ 'identity','change' ],
],
# added 2010.2
'db.config' => [
[], # 2010.2 Initial Ver 1 so adding dummy entry 0
# 2018.2 - renamed to servername
[ 'servername','name','value' ],
],
# added 2019.2
'db.configh' => [
[ 'sname','name','version','date','server','user','ovalue','nvalue' ],
],
'db.counters'=> [
[ 'name','value' ],
# value is now text in 2008.1
[ 'name','value' ],
],
'db.depot' => [
[ 'name','type','address','map' ],
[ 'name','type','extra','map' ],
],
'db.desc' => [
[ 'desckey','description' ],
],
# host field renamed 'extra' in 2005.2 - for
# all schema versions.
'db.domain' => [
[ 'name','type','extra','mount','owner','updatedate','options',
'description' ],
[ 'name','type','extra','mount','owner','updatedate','accessdate',
'options','description' ],
[ 'name','type','extra','mount','owner','updatedate','accessdate',
'options', 'mapstate', 'description' ],
[ 'name','type','extra','mount','mount2','mount3','owner',
'updatedate','accessdate','options', 'mapstate', 'description' ],
[ 'name','type','extra','mount', 'mount2', 'mount3','owner',
'updatedate','accessdate','options', 'description' ],
# 2011.1 streams support added, no schema change
[ 'name','type','extra','mount','mount2','mount3','owner',
'updatedate','accessdate','options','description' ],
# 2012.2 add stream, serverid, partition
[ 'name','type','extra','mount','mount2','mount3','owner',
'updatedate','accessdate','options','description','stream',
'serverid','contents' ],
# 2017.1 Change partition to contents so rename backwards
[ 'name','type','extra','mount','mount2','mount3','owner',
'updatedate','accessdate','options','description','stream',
'serverid','contents' ],
],
# added 2013.1
'db.excl' => [
[], # 2013.1 Initial Ver 1 so adding dummy entry 0
[ 'depotfile','client','user' ],
],
# added 2019.1
'db.exclg' => [
[ 'repo','ref','file','lockid','user','created' ]
],
# added 2019.1
'db.exclgx' => [
[ 'lockid','repo','ref','file','user','created' ]
],
'db.fix' => [
[ 'job','change','date','xstatus','client','user' ],
[ 'job','change','date','status','client','user' ],
],
# added 2017.2
'db.graphindex' => [
[ 'id','filename','commitdate','blobsha','commitsha','flags' ],
# 2018.1 added, names changed in 2019.1 without new version
[ 'name','date','blobsha','commitsha','size','type','lfsoid' ],
],
# added 2016.2
'db.graphperm' => [
[ 'name','repo','ref','type','user','perm' ],
],
'db.group' => [
[ 'user','group' ],
[ 'user','group','maxresults' ],
[ 'user','group','issubgroup','maxresults' ],
[ 'user','group','issubgroup','maxresults','maxscanrows' ],
[ 'user','group','issubgroup','maxresults','maxscanrows','timeout' ],
[ 'user','group','issubgroup','maxresults','maxscanrows','maxlocktime','timeout' ],
# 2007.2 remove issubgroup:
[ 'user','group','type','maxresults','maxscanrows','maxlocktime','timeout' ],
# 2009.2 no schema change:
[ 'user','group','type','maxresults','maxscanrows','maxlocktime','timeout' ],
# 2010.2 add passwordtimeout:
[ 'user','group','type','maxresults','maxscanrows','maxlocktime','timeout','passwordtimeout' ],
# 2016.1 add maxopenfiles:
[ 'user','group','type','maxresults','maxscanrows','maxlocktime','maxopenfiles','timeout','passwordtimeout' ],
],
# added 2015.1
'db.groupx' => [
[ 'group','ldapconf','ldapsearchquery','ldapuserattribute', ],
# 2016.2 add ldapdnaattribute:
[ 'group','ldapconf','ldapsearchquery','ldapuserattribute',
'ldapdnaattribute' ],
],
'db.have' => [
[ 'clientfile','depotfile','haverev' ],
[ 'clientfile','depotfile','haverev','type' ],
# type bit-shift in version 2
[ 'clientfile','depotfile','haverev','type' ],
# 2015.1 add time:
[ 'clientfile','depotfile','haverev','type','time' ],
],
# added 2018.1, names changed in 2019.1:
'db.haveg' => [
[ 'repo','clientfile','depotfile','client','type','action','blobsha','commitsha','flags' ],
],
# added 2017.1 with version 1:
'db.haveview' => [
[],
[ 'name','seq','mapflag','viewfile','depotfile' ],
],
# Obsolete since 2001. Removed in 2013.3
'db.integ' => [
[ 'tofile','fromfile','startfromrev','endfromrev','torev','how',
'committed','resolved','change' ],
],
'db.integed' => [
[ 'tofile','fromfile','startfromrev','endfromrev','starttorev',
'endtorev','how','change' ],
],
# added 2020.1
'db.integedss' => [
[ 'tokey','attr','fromkey','fromchange','basekey','basechange','how','change' ],
],
# Obsolete since 2009.2. Removed in 2013.3
'db.ixdate' => [
[ 'date','attr','value' ]
],
'db.ixtext' => [
[ 'word','attr','value' ]
],
'db.ixtexthx' => [
[ 'type','word','attr','value' ]
],
# added 2014.2:
'db.jnlack' => [
[ 'serverid','lastupdate','servertype','persistedjnl','persistedpos',
'isalive','appliedjnl','appliedpos' ],
# 2018.2
[ 'serverid','lastupdate','servertype','persistedjnl','appliedjnl',
'persistedpos','appliedpos','jcflags','isalive','serveroptions',
'failoverseen' ],
],
'db.job' => [
[ 'job','xuser','xdate','xstatus','description' ],
],
# Obsolete since 98.2. Removed in 2013.3
'db.jobdesc' => [
[ 'xjob','xdescription' ],
],
'db.label' => [
[ 'name','depotfile','haverev' ],
],
# added 2016.1, names changed in 2019.1:
'db.ldap' => [
[ 'name','host','port','ssl','type','pattern','basedn','filter',
'scope','binddn','bindpass','realm','groupbasedn','groupfilter',
'groupscope','options','attruid','attremail','attrname' ],
],
'db.locks' => [
[ 'depotfile','client','user','islocked' ],
[ 'depotfile','client','user','action','islocked' ],
[ 'depotfile','client','user','action','islocked','change' ],
],
'db.logger' => [
[ 'seq','key','attr' ],
],
'db.message' => [
[ 'language','id','message' ],
],
'db.monitor' => [
[ 'id','user','func','args','start','runstate' ],
[ 'id','user','func','args','start','runstate',
'client','host','app' ],
# 2014.2 add lockinfo, change func -> function
# change start -> startdate,change app -> prog:
[ 'id','user','function','args','startdate','runstate',
'client','host','prog','lockinfo' ],
],
# added 2012.2:
'db.nameval' => [
[], # 2012.2 Initial Ver 1 so adding dummy entry 0:
[ 'key', 'value' ],
],
# added 2016.2:
'db.object' => [
[ 'sha','type','data' ],
],
# added 2013.1, 2019.1 changed name to seq:
'db.property' => [
[ 'name','seq','type','scope','value','date','user' ],
],
'db.protect' => [
[ 'seq','user','host','perm','mapflag','depotfile' ],
[ 'seq','isgroup','user','host','perm','mapflag','depotfile' ],
[ 'seq','isgroup','user','host','perm','mapflag','depotfile' ],
# Ver 3 same as ver 2 but supports admin level access:
[ 'seq','isgroup','user','host','perm','mapflag','depotfile' ],
# Ver 4 same as ver 3 but %d wildcards now escaped to %%d:
[ 'seq','isgroup','user','host','perm','mapflag','depotfile' ],
# 2016.2 add subpath, update:
[ 'seq','isgroup','user','host','perm','mapflag','depotfile',
'subpath','update' ],
],
# added 2016.2:
'db.pubkey' => [
# 2017.1 added update but didn't bump version!
['user','scope','key','digest','update' ],
],
# added 2016.2:
'db.ref' => [
[ 'repo','name','type','ref','symref' ],
],
# added 2016.2:
'db.refhist' => [
[ 'repo','name','type','action','date','user','ref','symref' ],
],
# added 2015.1:
'db.remote' => [
[ 'id','owner','options','address','desc','update','access','fetch',
'push' ],
# 2015.2 add rmtuser
[ 'id','owner','options','address','desc','update','access','fetch',
'push','rmtuser' ],
],
# added 2016.2:
'db.repo' => [
[ 'repo','owner','created','pushed','forked','desc','branch' ],
# 2017.1 add mirror
[ 'repo','owner','created','pushed','forked','desc','branch','mirror'],
# 2017.2 add options, id
[ 'repo','owner','created','pushed','forked','desc','branch','mirror','options','id' ],
# 2019.2
[ 'repo','owner','created','pushed','forked','desc','branch','mirror','options','id','gcmrrserver' ],
],
# added 2016.2, reserved for future use, removed in 2017.1:
'db.repoview' => [
[ 'name','seq','mapFlag','viewFile','depotFile' ],
],
'db.resolve' => [
[ 'tofile','fromfile','startfromrev','endfromrev','starttorev',
'endtorev','how','resolved','basefile','baserev' ],
# 2011.1 change resolved -> state:
[ 'tofile','fromfile','startfromrev','endfromrev','starttorev',
'endtorev','how','state','basefile','baserev' ],
],
# added 2016.2, fields renamed in 2017.1:
'db.resolveg' => [
['tofile','fromfile','basesha','wantssha','how','state']
],
'db.rev' => [
[ 'depotfile','depotrev','type','ishead','action','change','date',
'lbrfile','lbrrev','lbrtype' ],
[ 'depotfile','depotrev','type','ishead','action','change','date',
'digest','lbrfile','lbrrev','lbrtype' ],
[ 'depotfile','depotrev','type','action','change','date',
'digest','lbrfile','lbrrev','lbrtype' ],
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','lbrfile','lbrrev','lbrtype' ],
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','traitlot','lbrfile','lbrrev','lbrtype' ],
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','size','traitlot','lbrfile','lbrrev','lbrtype' ],
# type bit-shift in Ver 6
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','size','traitlot','lbrfile','lbrrev','lbrtype' ],
# Ver 7
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','size','traitlot','lbrfile','lbrrev','lbrtype' ],
# Ver 8:
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','size','traitlot','lbrislazy','lbrfile','lbrrev','lbrtype' ],
# 2010.2 ver 9 no schema change, sub-indices support?
[ 'depotfile','depotrev','type','action','change','date','modtime',
'digest','size','traitlot','lbrislazy','lbrfile','lbrrev','lbrtype' ],
],
'db.revcx' => [
[ 'change','depotfile','depotrev','action' ],
],
'db.review' => [
[ 'user','seq','mapflag','depotfile','type' ],
# Rev 1 same as rev 0, but %d wildcards now escaped to %%d:
[ 'user','seq','mapflag','depotfile','type' ],
],
# added 2015.2:
'db.rmtview' => [
[ 'id','seq','mapFlag','localFile','remoteFile' ],
# 2016.1 add retain:
[ 'id','seq','mapFlag','localFile','remoteFile','retain' ],
],
# added 2019.2:
'db.scanctl' => [
[ 'depotPath','state','seq','dirs','files','zeros','dirserr','pri','reqpause','err' ],
],
# added 2019.2:
'db.scandir' => [
[ 'lskey','seq','file' ],
],
# added 2014.1
'db.sendq' => [
[ 'taskid','seq','handle','depotfile','clientfile','haverev',
'type','modtime','digest','size','lbrfile','lbrrev','lbrtype',
'flags' ],
# 2016.2 add clienttype:
[ 'taskid','seq','handle','depotfile','clientfile','haverev',
'type','modtime','digest','size','lbrfile','lbrrev','lbrtype',
'flags','clienttype' ],
# 2017.1 add bsha:
[ 'taskid','seq','handle','depotfile','clientfile','haverev',
'type','modtime','digest','size','lbrfile','lbrrev','lbrtype',
'flags','clienttype','bsha' ],
# 2017.2 add reposlot, sdigest:
[ 'taskid','seq','handle','depotfile','clientfile','haverev',
'type','modtime','digest','size','lbrfile','lbrrev','lbrtype',
'flags','clienttype','bsha','reposlot','sdigest' ],
# 2018.2 add depotrev,change,date, 2019.1 changed some names:
[ 'taskid','seq','handle','depotfile','clientfile','haverev',
'type','modtime','digest','size','lbrfile','lbrrev','lbrtype',
'flags','clienttype','depotrev','change','date','blobsha','reposlot','sdigest' ],
],
# added 2012.2
'db.server' => [
[ 'id','type','name','address','services','desc' ],
# 2014.2 add user :
[ 'id','type','name','address','services','desc','user' ],
# 2015.1 add externaladdress:
[ 'id','type','name','address','externaladdress','services',
'desc','user' ],
# 2018.2
[ 'id','type','name','address','externaladdress','services',
'desc','user','options','rplfrom','failoverseen']
],
# added 2015.1
'db.stash' => [
[ 'client','stream','type','seq','change' ],
],
# added 2019.1
'db.storage' => [
[ 'file','rev','type','refcount','digest','size','serversize','compcksum','date' ],
],
# added 2019.1
'db.storageg' => [
[ 'repo','sha','type','refcount','date' ],
],
# added 2012.2
'db.stream' => [
[], # 2012.2 Initial Ver 1 so adding dummy entry 0, 2019.1 changes some names:
[ 'stream','parent','title','type','preview','change','copychange',
'mergechange','highchange','hash','status' ],
],
# added 2017.1:
'db.submodule' => [
[ 'repo','path','subrepo' ],
],
# added 2014.2
'db.svrview' => [
[ 'id','type','seq','mapflag','viewfile' ],
],
# added 2012.1
'db.template' => [
[ 'name','change','seq','parent','type','path','viewfile',
'depotFile','changemap' ],
# 2014.2 No schema change; ChangeView spec field support:
[ 'name','change','seq','parent','type','path','viewfile',
'depotFile','changemap' ],
# strangely, the 2015.2 recs are ver 3, but the 2017.1
# pre-release has ver 2. Sure it's an old glitch, will
# likely need to fix this, but for now:
[ 'name','change','seq','parent','type','path','viewfile',
'depotFile','changemap' ],
# 2015.2 changemap change to type Value from type Change
[ 'name','change','seq','parent','type','path','viewfile',
'depotFile','changemap' ],
],
# added 2016.1
'db.templatesx' => [
[ 'shelf','name','seq','change','parent','type','path','viewfile',
'depotFile','changemap' ],
],
# added 2016.1
'db.templatewx' => [
[ 'client','name','seq','change','parent','type','path','viewfile',
'depotFile','changemap' ],
],
# added 2017.2
'db.ticket' => [
[ 'user','host','ticket','state','token','updatedate' ],
],
# added 2017.2
'db.ticket.rp' => [
[ 'user','host','ticket','state','token','updatedate' ],
],
'db.traits' => [
[ 'traitlot','name','type', 'value' ],
],
'db.trigger' => [
[ 'seq','trigger','mapflag','depotfile','action' ],
# "trigger" field now contains the trigger type rather than the
# name - which is in the "name" field:
[ 'seq','name','mapflag','depotfile','trigger','action' ],
# As previous, but %d wildcards now %%d:
[ 'seq','name','mapflag','depotfile','trigger','action' ],
# 2015.1 add triggerdepotfile:
[ 'seq','name','mapflag','depotfile','triggerdepotfile','trigger','action' ],
],
# added 2019.2
'db.upgrades' => [
[ 'seq','name','state','startdate','enddate','info' ],
],
'db.user' => [
[ 'user','email','jobview','updatedate','accessdate','fullname' ],
[], # there was no ver 1!
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password' ],
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password','strength', 'ticket','enddate' ],
# 2010.2 add type, passworddhangedate:
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password','strength', 'ticket','enddate',
'type','passwordchangedate' ],
# 2013.1 add passexpire, change passwordchangedate -> passdate:
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password','strength', 'ticket','enddate',
'type','passdate','passexpire' ],
# 2013.2 add attempts:
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password','strength', 'ticket','enddate',
'type','passdate','passexpire','attempts' ],
# 2014.2 add auth:
[ 'user','email','jobview','updatedate','accessdate',
'fullname','password','strength', 'ticket','enddate',
'type','passdate','passexpire','attempts','auth' ],
],
# added 2014.2:
'db.uxtext' => [
[ 'word','attr','value' ],
],
'db.view' => [
[ 'name','seq','mapflag','viewfile','depotfile' ],
# As previous, but %d wildcards now %%d.
[ 'name','seq','mapflag','viewfile','depotfile' ],
],
'db.working' => [
[ 'clientfile','depotfile','client','user','haverev','workrev',
'type','action','change','modtime','islocked' ],
# modtime was actually called date
[ 'clientfile','depotfile','client','user','haverev','workrev',
'type','action','change','modtime','islocked' ],
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked' ],
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest' ],
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'traitlot' ],
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot' ],
# type bit-shift in version 6
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot' ],
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot','tampered' ],
# 2007.2 add clnttype:
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot','tampered','clnttype' ],
#2009.2 add mfile:
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot','tampered','clnttype','mfile' ],
#2013.2 add status, 2019.1 names changed:
[ 'clientfile','depotfile','client','user','haverev','workrev',
'isvirtual','type','action','change','modtime','islocked','digest',
'size','traitlot','tampered','clienttype','movedfile','status' ],
],
# added 2016.2, names changed in 2019.1
'db.workingg' => [
[ 'cfile','dfile','client','user','hrev','wrev','virtual','type',
'action','change','modtime','islocked','digest','size','traitlot',
'tampered','clienttype','movedfile','status','sha','repo' ],
],
# added 2019.1 - as documented table at version 3
'rdb.lbr' => [
[],
[],
[],
[ 'file','rev','type','status','who','when','action','digest','size',
'retries','errmsg','change','date','modTime','origin','target' ],
],
);
# Sub-index tables with schemas that track with the main table can be set here:
$FIELDMAP{ 'db.changex' } = $FIELDMAP{ 'db.change' };
$FIELDMAP{ 'db.fixrev' } = $FIELDMAP{ 'db.fix' };
$FIELDMAP{ 'db.have.rp' } = $FIELDMAP{ 'db.have' };
$FIELDMAP{ 'db.have.pt' } = $FIELDMAP{ 'db.have' };
$FIELDMAP{ 'db.integtx' } = $FIELDMAP{ 'db.integed' };
$FIELDMAP{ 'db.jobpend' } = $FIELDMAP{ 'db.job' };
$FIELDMAP{ 'db.locksg' } = $FIELDMAP{ 'db.locks' };
$FIELDMAP{ 'db.resolvex' } = $FIELDMAP{ 'db.resolve' };
$FIELDMAP{ 'db.revdx' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revhx' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revpx' } = $FIELDMAP{ 'db.rev' };
# added 2019.1
$FIELDMAP{ 'db.revstg' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revsx' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revsh' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revtx' } = $FIELDMAP{ 'db.rev' };
$FIELDMAP{ 'db.revux' } = $FIELDMAP{ 'db.rev' };
# added 2018.2
$FIELDMAP{ 'db.sendq.pt' } = $FIELDMAP{ 'db.sendq' };
# added 2019.1
$FIELDMAP{ 'db.storagesh' } = $FIELDMAP{ 'db.storage' };
# added 2019.2
$FIELDMAP{ 'db.upgrades.rp' } = $FIELDMAP{ 'db.upgrades' };
$FIELDMAP{ 'db.user.rp' } = $FIELDMAP{ 'db.user' };
$FIELDMAP{ 'db.view.rp' } = $FIELDMAP{ 'db.view' };
$FIELDMAP{ 'db.workingx' } = $FIELDMAP{ 'db.working' };
# added 2019.1 as documented
$FIELDMAP{ 'pdb.lbr' } = $FIELDMAP{ 'rdb.lbr' };
#*******************************************************************************
#* Construct a faster access version of the fieldmap. The current format
#* of the fieldmap is easier to read than the speedmap, so it's easier to
#* maintain it in that form, but for performance, we need it in a more
#* direct access structure.
#*******************************************************************************
foreach my $table ( keys %FIELDMAP ) {
my $v_ary = [];
foreach my $version ( @{$FIELDMAP{ $table }} ) {
my $idx = 0;
my $h = {};
foreach my $field ( @$version ) {
$h->{ $field } = $idx++;
}
push( @$v_ary, $h );
}
$SPEEDMAP{ $table } = $v_ary;
}
#*******************************************************************************
#* Method definitions
#*******************************************************************************
sub new
{
my $class = shift;
my $rawrec = shift;
my $self = { 'record' => $rawrec, 'fields' => undef };
bless( $self, $class );
}
sub DESTROY
{
}
sub Raw()
{
my $self = shift;
return $self->{'record'};
}
sub SetRaw( $ )
{
my $self = shift;
my $rec = shift;
$self->{'record'} = $rec;
$self->{'fields'} = undef;
}
# Returns the name of the table for this record. Faster than
# calling Fields
sub Table()
{
my $self = shift;
my $rec = $self->{'record'};
my @fields = split(/ /, $rec );
my $table = $fields[ 2 ];
$table =~ s/^\@(.*)\@/$1/;
return $table;
}
# Return the index for the named field in the specified table/version or
# undef if that table/version does not contain a field with that name. Field
# name must be in lowercase.
sub _FieldIndex( $$$ ) {
my $table = shift;
my $version = shift;
my $field = shift;
my $idx = 0;
if ( ! defined( $SPEEDMAP{ $table } ) ) {
croak("No fieldmap for table $table");
} elsif ( ! defined( $SPEEDMAP{ $table }[ $version ]{ $field } ) ) {
return undef;
} else {
$idx = $SPEEDMAP{ $table }[ $version ]{ $field };
$idx += 3; # Offset for operation, version, tablename
}
return $idx;
}
# Fetch the named field from a record using the field map
sub FetchField( $ ) {
my $self = shift;
my $field = shift;
my @fields = $self->Fields();
my $op = $fields[0];
my $version = $fields[ 1 ];
my $tablename = $fields[ 2 ];
$field = lc $field;
return $op if ( $field eq "operation" );
return $version if ( $field eq "version" );
return $tablename if ( $field eq "table" );
my $idx = _FieldIndex( $tablename, $version, $field );
return undef unless $idx;
return $fields[ $idx ];
}
# Get the named field from a record using the field map - alias for FetchField
sub GetField( $ ) {
my $self = shift;
my $field = shift;
return $self->FetchField( $field );
}
# Set the named field in the record using the field map
sub SetField( $$ )
{
my $self = shift;
my $field = shift;
my $value = shift;
my @fields = $self->Fields();
my $lc_field = lc( $field );
my $op = $fields[0];
my $version = $fields[ 1 ];
my $tablename = $fields[ 2 ];
if ( $lc_field eq "version" || $lc_field eq "table" )
{
warn( "Setting $field not permitted" );
return undef;
}
my $idx = 0; # Default to operation field.
if ( $lc_field ne "operation" )
{
$idx = _FieldIndex( $tablename, $version, $lc_field );
}
return undef unless defined $idx;
$fields[ $idx ] = $value;
$self->Set( @fields );
return $self;
}
# Fetch fields by name. Expressions such as $rec->LbrFile() will be resolved
# using the field map.
sub AUTOLOAD
{
my $self = shift;
my $field = $AUTOLOAD;
$field =~ s/.*:://;
if ( @_ )
{
return $self->SetField( $field, shift );
}
else
{
return $self->FetchField( $field );
}
}
#*******************************************************************************
#* Main Journal package
#*******************************************************************************
package P4::Journal;
use POSIX qw( :fcntl_h ); # SEEK_SET, SEEK_END, etc.
use Carp;
use English;
use strict;
use vars qw($VERSION @ISA @EXPORT @EXPORT_OK @MEMBERS );
require DynaLoader;
@ISA = qw( DynaLoader );
$VERSION = '2020.1.1953492';
# Bootstrapping. A little tricky since we may or may not have compiled
# versions of some methods. We try to load the shared object first, and
# if that fails, we'll load in the pure Perl implementations of the
# methods.
eval {
no warnings 'all';
bootstrap P4::Journal $VERSION ;
use warnings 'all';
};
if ( $@ )
{
require P4::Journal::Subs;
}
@MEMBERS = qw(
File
EndParse
UpdateFreq
);
#*******************************************************************************
#* Public Methods
#*******************************************************************************
sub new
{
my $class = shift;
my $self = {};
my $arg;
@$self{@MEMBERS} = (undef) x @MEMBERS;
bless($self, $class);
if ( @_ ) {
$self->{File} = shift
}
return $self;
}
sub File( $ )
{
my $self = shift;
$self->{'File'} = shift;
}
sub Parse()
{
my $self = shift;
my $file;
my $filesize = 0;
my $offset = 0;
my $rec = "";
my @fields;
my $line = 0;
my $close = 1;
$self->{'EndParse'} = 0;
if ( @_ ) {
$file = shift;
open( FH, $file ) or croak( "Failed to open file $file" );
seek( FH, 0, SEEK_END );
$filesize = tell( FH );
seek( FH, 0, SEEK_SET );
} elsif ( defined ($self->{File} )) {
$file = $self->{File};
open(FH, $file) or croak("Failed to open file $file");
seek( FH, 0, SEEK_END );
$filesize = tell( FH );
seek( FH, 0, SEEK_SET );
} else { # Read from stdin instead.
$close = 0;
*FH = *STDIN;
}
# Work out the update frequency based on a rough guess of the
# number of lines in the file. Average width of lines will vary
# greatly, but 100 chars is a good default. We aim for 100 updates
# over the run, but we never want to update so often that it slows
# it down too much. For small files, an update every 1000 lines is
# enough.
if ( $filesize )
{
$self->{ 'UpdateFreq' } = $filesize/10000;
if ( $self->{ 'UpdateFreq' } < 1000 )
{
$self->{ 'UpdateFreq' } = 1000;
}
}
local $_;
my $recno = 0;
while ( <FH> ) {
last if ( $self->{'EndParse'} );
if ( $filesize && ($line % $self->{'UpdateFreq'} == 0) )
{
$offset = tell( FH );
$self->UpdateProgress( $offset/$filesize * 100 );
}
$line++;
$rec .= $_;
if ( $self->CompleteRecord( $rec ) )
{
$recno++;
$rec =~ s/\r?\n$//;
$self->ParseRecord( new P4::JournalRec( $rec ) );
$rec = "";
}
}
close(FH) if ( $close );
$self->UpdateProgress( 100 ) if ( $filesize );
1;
}
# Hook to allow aborting the parse halfway through
sub EndParse( )
{
my $self = shift;
$self->{'EndParse'} = 1;
}
# Default implementation does nothing.
sub ParseRecord( $ )
{
my $rec = shift;
warn( "Called the default implemetation of ParseRecord. You probably" .
" didn't want to do that." );
}
# Default implementation just writes to stderr
sub UpdateProgress( $ )
{
my $self = shift;
my $pct = shift;
$| = 1;
printf( STDERR "Progress: [ %.0f%% ]\r", $pct );
print( STDERR "\n" ) if ( $pct == 100 );
}
# Autoload methods go after =cut, and are processed by the autosplit program.
1;
__END__
=head1 NAME
P4::Journal - Perl extension for parsing Perforce Journals
=head1 SYNOPSIS
use P4::Journal;
my $journal = new P4::Journal("jnl.1");
$journal->Parse();
=head1 DESCRIPTION
P4::Journal provides a simple way to parse a Perforce Journal file.
It provides a basic parser which can be use to build applications which
parse Perforce Journal files and edit them. It leaves the policy of how
it will be used to the client merely implements the minimum functionality
required to implement Journal parsing and editing scripts.
Just Parsing the checkpoint/journal is not any use in itself. To build
useful functionality, derive your own class from P4::Journal and override
the ParseRecord method.
Documentation is still quite brief - sorry.
=head1 METHODS - P4::Journal
=over 4
=item new()
The constructor for the P4::Journal class can be invoked either
with, or without a filename argument. If invoked without an
argument, then the filename must be passed as an argument to the
Parse() method when it is called.
my $jnl = new P4::Journal();
my $jnl2 = new P4::Journal("jnl.12");
=item Parse()
Parses the specified file, and builds a data structure containing
the parsed data.
$jnl->Parse("jnl.12");
$jnl2->Parse();
=item ParseRecord()
For each record that is parsed, a P4::JournalRec object is
created and passed to this method. The default implementation
just dumps the record in a semi-readable form to STDOUT. You
will almost certainly want to override this method to achieve
your aim.
See below for documentation on the P4::JournalRec class which
is your main tool for manipulating journal records.
=item EndParse()
Can be called to abort a parse halfway through if you've found
what you're looking for. Useful on large checkpoint files.
=item UpdateProgress()
Called to update the user on progress through the journal file.
The default implementation writes to stderr so the output doesn't
get mixed with edited journal records. If you want to surpress or
enhance the default output, then override this method in your
subclass.
=back
=head1 DESCRIPTION - P4::JournalRec
P4::JournalRec provides an object-oriented interface to the
journal records extracted from the input file. You have access
to the raw record; you may access the record as an array of
fields, or you may access fields by name.
=head1 METHODS - P4::JournalRec
=over 4
=item Raw()
Returns the raw journal record as a scalar
=item SetRaw()
Allows you to supply an updated raw record - use at your
own risk!
=item Set()
Supply a new record as an array of fields. Set() will handle
all the formatting of the record for you.
=item Fields()
Returns all the fields in the record as an array. All formatting
is removed so you get the decoded data to do with as you will.
Note that $rec->Set( $rec->Fields() ) is essentially a no-op.
=item FetchField()
Returns the value of the named field in the record. Note that:
C<$rec-E<gt>FetchField( "name" );>
is equivalent to:
C<$rec-E<gt>Name();>
courtesy of the AUTOLOADER.
=item GetField()
Returns the value of the named field in the record. Note that:
C<$rec-E<gt>GetField( "name" );>
is equivalent to:
C<$rec-E<gt>Name();>
courtesy of the AUTOLOADER.
Alias for C<$rec-E<gt>FetchField();>
=item SetField()
Updates the value of the named field in the record. Note that:
C<$rec-E<gt>SetField( "name", "fish" );>
is equivalent to:
C<$rec-E<gt>Name( "fish" );>
courtesy of the AUTOLOADER.
=item Operation()
Returns the type of operation for this journal record. A "pv" is
an insert (put), a "dv" is a delete, an "rv" is a replace and a
"vv" is a verify record. This field may be updated.
=item Version()
Returns the schema version number for this record. If you don't
know what that is, then don't mess with it.
=item Table()
Returns the name of the table for the record (ie. db.rev etc ).
=item AUTOLOAD()
The AUTOLOAD method allows you to get and set individual fields
by name. i.e. the change number in a db.change record might be
fetched like this:
C<$change = $rec-E<gt>Change();>
and set like this:
C<$rec-E<gt>Change( $change );>
You should be sure that such a field exists in the record you
are accessing before you call the method.
=head1 AUTHOR
Tony Smith <tony@perforce.com>
John Halbig <support@perforce.com>
=head1 LICENSE
Copyright (c) 1997-2017, Perforce Software, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL PERFORCE SOFTWARE, INC. BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
=head1 SEE ALSO
perl(1).
=cut