eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}' & eval 'exec perl -S $0 $argv:q' if 0; # THE PRECEEDING STUFF EXECS perl via $PATH # -*-Fundamental-*- # This is work in progress, but might serve as a good example of how a # process can "tail" the Perforce journal to learn of interesting # events. in the hardwired example here, we look for submits to # //iweb/..., and sync a particular client workspace. Better would be # to add configurability for "plugins" to register interest and be # sent some sort of signal when the event is seen in the journal. # use POSIX 'setsid'; use POSIX ':sys_wait_h'; # This dispatch table talls us what to run when we see particular # entries in the journal. # my @Actions = split(/\n/, </dev/null' or die "Can't write to /dev/null: $!"; defined(my $pid = fork) or die "Can't fork: $!"; exit if $pid; setsid or die "Can't start a new session: $!"; open STDERR, '>&STDOUT' or die "Can't dup stdout: $!"; } # TBD: configurability & async signalling use Carp; use strict; use Fcntl ':flock'; # import LOCK_* constants $| = 1; my $Myname; ($Myname = $0) =~ s%^.*/%%; my $Usage = <] [-J ] LIT sub usage { print STDERR $Usage; exit 1; } sub help { print STDERR <= 0) { if ($ARGV[0] eq "-J") { shift; if ($ARGV[0] < 0) { &usage; } $Journal = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-L") { shift; if ($ARGV[0] < 0) { &usage; } $LOGFILE = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-a") { shift; if ($ARGV[0] < 0) { &usage; } $Actions_file = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-help") { &help; } elsif ($ARGV[0] =~ /^-/) { &usage; } if ($Args ne "") { $Args .= " "; } push(@Args, $ARGV[0]); shift; } if (defined($Actions_file)) { if (! open(A, "<$Actions_file")) { print STDERR "$Myname: can't open \"$Actions_file\": $!\n"; exit 1; } my $slashsave = $/; undef $/; @Actions = split(/\n/, ); $/ = $slashsave; close A; } # First, open the journal file # my $J_size; #$J_size = 999999999; # DEBUG: fake a truncation my $split_journal_inquot = 0; my $split_journal_value; my @j; sub split_journal { my ($l, $func) = @_; if (! $split_journal_inquot) { # We've got a new line, and we're not in quote, so reset the journal # field values array # @j = (); } while ($l) { if ($split_journal_inquot) { while ($l) { if ($l =~ /^@@(.*\n)/) { $l = $1; $split_journal_value .= "@"; next; } if ($l =~ /^@(.*\n)/) { $l = $1; push(@j, $split_journal_value); $split_journal_inquot = 0; last; } if ($l =~ /^([^@]*\n)/) { $split_journal_value .= $1; $l = ""; last; } if ($l =~ /^([^@]+)(@.*\n)/) { $split_journal_value .= $1; $l = $2; next; } } } else { if ($l eq "\n") { last; } if ($l =~ /^(\s+)(.*\n)/) { $l = $2; next; } if ($l =~ /^@(.*\n)/) { $l = $1; $split_journal_inquot = 1; $split_journal_value = ""; } else { $l =~ /([^\s]+)(.*\n)/; push(@j, $1); $l = $2; } } } # We've emptied the line, and we're not in a quoted field, so # we've got the entire journal entry; process it. # if (! $split_journal_inquot) { no strict 'refs'; &$func(@j); use strict 'refs'; } } sub log { my ($m) = @_; my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); open LOG, ">>$LOGFILE" || die "Can't open \"$LOGFILE\" for append"; my $ts = sprintf("%02d-%02d-%04d %02d:%02d:%02d", $mon+1, $mday, $year+1900, $hour, $min, $sec); my @m = split(/\n/, $m); foreach my $l (@m) { printf LOG "$ts: [$$] $l\n"; $ts =~ s/./ /g; } close LOG } sub tail_check { my ($func) = @_; my @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } my $C_size = $s[7]; if ($C_size == $J_size) { return; } if ($C_size < $J_size) { # We were truncated; rewind: if (! seek(J, 0, 0)) { &log("Could not rewind \"$Journal\": $!\n"); exit 1; } if ($C_size == 0) { $J_size = 0; return 0; } } # If here, either size grew, or we truncated and there's # stuff to read. # if (! flock(J, LOCK_EX)) { &log("couldn't flock LOCK_EX \"$Journal\": $!\n"); exit 1; } # Just stash stuff in here, so we can promptly relinquish the lock. # my @J; while () { push(@J, $_); } if (! flock(J, LOCK_UN)) { &log("couldn't flock LOCK_UN \"$Journal\": $!\n"); exit 1; } # Now process what we read... # foreach $_ (@J) { split_journal($_, $func); } @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } $J_size = $s[7]; return 1; } sub tail { my($Journal, $func) = @_; while (1) { if (open(J, "<$Journal")) { last; } if ($! =~ /^No such file or directory/i) { &log("Could not open journal \"$Journal\": $!\n"); exit 1; } sleep 1; } &log("opened $Journal\n"); if (! seek(J, 0, 2)) { &log("Could not seek to end of \"$Journal\": $!\n"); exit 1; } my @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } $J_size = $s[7]; while (1) { &tail_check($func); sleep 1; # Recieve the souls of lost zombies... # waitpid(-1,&WNOHANG); } } my $have_change = 0; my $p4 = "/a/tools/bin/p4"; my $P4 = "$p4 -p perforce:1666 -u p4"; $ENV{"P4CONFIG"} = "P4ENV"; sub action { my ($name, $cmd) = @_; &log("$name> $cmd\n"); my $cmdout = `$cmd`; my $sts = $?; my $schr = ($sts ? "!" : "="); &log("$name$schr $cmdout\n"); return $sts; } sub waitfor { my ($name, $change) = @_; # To be safe, wait until "p4 counter change" reflects this change. # We won't wait forever, however; if we don't see the expected # change within a reasonable time, we just give up. # my $i; for ($i = 20; $i; $i--) { my $cur_change = `$P4 counter change 2>&1`; chop $cur_change; if ($cur_change >= $change) { return 1; } if ($i > 10) { sleep 1; } else { sleep 3; } } &log("$name: *** timed out waiting for the change counter!\n"); return 0; } my %Sync_last; sub sync_module { my($name, $dir, $change) = @_; # Only one per customer at a given change level # if (! ($change > $Sync_last{$name})) { return; } # Having this up here means that we'll only try triggering for this # $mod/$change one time. But relying on subsequent records (if the # change contained multiple matching files, for instance) for # retries after failures isn't right. Our contract is to bump the # handler action once for applicable change set. # $Sync_last{$name} = $change; my $have = `$P4 counter $name 2>&1`; if ($change > $have) { &log("$name: p4 sync \@$change\n"); my $pid; if (! ($pid = fork())) { # In the child if (! chdir $dir) { &log("name: *** could not cd to $dir: $!\n"); exit 1; } if (&waitfor($name, $change)) { my $sts; # Note: we rely on the $P4CONFIG stuff here! # delete $ENV{"PWD"}; delete $ENV{"USER"}; delete $ENV{"USERNAME"}; if (! ($sts = &action($name, "$p4 sync \@$change 2>&1"))) { &action($name, "$P4 counter $name $change 2>&1"); } exit $sts; } &log("$name: *** timed out waiting for the change counter update\n"); exit 1; } } } my %Ddfab_last; sub ddfab_module { my($name, $dir, $change) = @_; # Only one per customer at a given change level # if (! ($change > $Ddfab_last{$name})) { return; } # Having this up here means that we'll only try triggering for this # $mod/$change one time. But relying on subsequent records (if the # change contained multiple matching files, for instance) for # retries after failures isn't right. Our contract is to bump the # handler action once for applicable change set. # $Ddfab_last{$name} = $change; my $mod = $name; $mod =~ s/^.*_//; my $have = `$P4 counter $name 2>&1`; if ($change > $have) { &log("$name: ddfab_run_$mod $change\n"); my $pid; if (! ($pid = fork())) { # In the child if (&waitfor($name, $change)) { # For the build daemon stuff, we just bump the # ddfab_run_$mod script; _it_ will decide when to update # the counter. # my $sts = &action($name, "/usr/bin/rsh build $Ddfab_dir/ddfab_run_$mod $change 2>&1"); exit $sts; } &log("name: *** timed out waiting for the change counter update\n"); exit 1; } } } sub handle_entry { my (@j) = @_; foreach my $a (@Actions) { if ($a =~ /^\s*\#/ || $a =~ /^\s*$/) { next; } my($name, $op, $table, $path, $chdir, $action) = split(/\s+/, $a); if ($j[0] eq $op && $j[2] eq $table && $j[3] =~ /$path/) { no strict 'refs'; &$action($name, $chdir, $j[7]); use strict 'refs'; } } } daemonize; &tail($Journal, "handle_entry");