eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}' & eval 'exec perl -S $0 $argv:q' if 0; # THE PRECEEDING STUFF EXECS perl via $PATH # -*-Fundamental-*- # This is work in progress, but might serve as a good example of how a # process can "tail" the Perforce journal to learn of interesting # events. # # This script now allows for multiple entries in an Actions table (see # below) which allows you to configure it to trigger different actions # when particular journal entries occur. # use POSIX 'setsid'; use POSIX ':sys_wait_h'; my $p4 = "/a/tools/bin/p4"; my $P4 = "$p4 -p perforce:1666 -u p4"; $ENV{"P4CONFIG"} = "P4ENV"; # This dispatch table talls us what to run when we see particular # entries in the journal. The default one below has been left in # mainly as an example; it can be overridden with the "-a # " option. The special name "ddfab_*" causes p4jd to # add entries to the actions table based on information in the Branch # specs. # my $Actions_file; my @Actions; my @Branch_Actions; sub def_actions { @rawActions = split(/\n/, <) { chomp; push(@rawActions, $_); } close A; } foreach my $action (@rawActions) { if ($action =~ /^ddfab_\*/) { $Use_branch_actions = 1; &load_branch_actions(); } else { push (@Actions, $action); } } foreach my $a (@Actions) { &log("loadactions(): $a"); } } sub load_branch_actions { if (! open(BR, "$P4 branches |")) { print "$Myname: can't open \"$P4 branches\": $!\n"; exit 1; } @Branch_Actions = (); while (
) { if (/^Branch ([^\s]+) [0-9\/]+ '\*([^;]+);/) { my $branch = $1; my $attrs = $2; if ($attrs =~ /\Winactive\W/) { next; } foreach my $attr (split(/\s+/, $attrs)) { if ($attr =~ /^build:(.*)$/) { my $bld_type = $1; if ($bld_type eq "daily") { next; } $ent = "ddfab_$bld_type\tpv\tdb.rev\t//prod/$branch"; if ($bld_type eq "app" || $bld_type eq "os") { $ent .= "/$bld_type"; } $ent .= "\t-\tddfab_module"; push(@Branch_Actions, $ent); } } } } close (BR); } sub daemonize # courtesy of "man perlipc": { chdir '/' or die "Can't chdir to /: $!"; open STDIN, '/dev/null' or die "Can't read /dev/null: $!"; open STDOUT, '>/dev/null' or die "Can't write to /dev/null: $!"; defined(my $pid = fork) or die "Can't fork: $!"; exit if $pid; setsid or die "Can't start a new session: $!"; open STDERR, '>&STDOUT' or die "Can't dup stdout: $!"; } # TBD: configurability & async signalling use Carp; use strict; use Fcntl ':flock'; # import LOCK_* constants $| = 1; my $Myname; ($Myname = $0) =~ s%^.*/%%; my $Usage = <] [-J ] LIT sub usage { print STDERR $Usage; exit 1; } sub help { print STDERR <= 0) { if ($ARGV[0] eq "-J") { shift; if ($ARGV[0] < 0) { &usage; } $Journal = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-L") { shift; if ($ARGV[0] < 0) { &usage; } $LOGFILE = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-a") { shift; if ($ARGV[0] < 0) { &usage; } $Actions_file = $ARGV[0]; shift; next; } elsif ($ARGV[0] eq "-help") { &help; } elsif ($ARGV[0] =~ /^-/) { &usage; } if ($Args ne "") { $Args .= " "; } push(@Args, $ARGV[0]); shift; } &log("$Myname: starting...\n"); &loadactions(); # First, open the journal file # my $J_size; #$J_size = 999999999; # DEBUG: fake a truncation my $split_journal_inquot = 0; my $split_journal_value; my @j; sub split_journal { my ($l, $func) = @_; if (! $split_journal_inquot) { # We've got a new line, and we're not in quote, so reset the journal # field values array # @j = (); } while ($l) { if ($split_journal_inquot) { while ($l) { if ($l =~ /^@@(.*\n)/) { $l = $1; $split_journal_value .= "@"; next; } if ($l =~ /^@(.*\n)/) { $l = $1; push(@j, $split_journal_value); $split_journal_inquot = 0; last; } if ($l =~ /^([^@]*\n)/) { $split_journal_value .= $1; $l = ""; last; } if ($l =~ /^([^@]+)(@.*\n)/) { $split_journal_value .= $1; $l = $2; next; } } } else { if ($l eq "\n") { last; } if ($l =~ /^(\s+)(.*\n)/) { $l = $2; next; } if ($l =~ /^@(.*\n)/) { $l = $1; $split_journal_inquot = 1; $split_journal_value = ""; } else { $l =~ /([^\s]+)(.*\n)/; push(@j, $1); $l = $2; } } } # We've emptied the line, and we're not in a quoted field, so # we've got the entire journal entry; process it. # if (! $split_journal_inquot) { no strict 'refs'; &$func(@j); use strict 'refs'; } } sub log { my ($m) = @_; my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time); open LOG, ">>$LOGFILE" || die "Can't open \"$LOGFILE\" for append"; my $ts = sprintf("%02d-%02d-%04d %02d:%02d:%02d", $mon+1, $mday, $year+1900, $hour, $min, $sec); my @m = split(/\n/, $m); foreach my $l (@m) { printf LOG "$ts: [$$] $l\n"; $ts =~ s/./ /g; } close LOG } sub tail_check { my ($func) = @_; my @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } my $C_size = $s[7]; if ($C_size == $J_size) { return; } if ($C_size < $J_size) { # We were truncated; rewind: if (! seek(J, 0, 0)) { &log("Could not rewind \"$Journal\": $!\n"); exit 1; } if ($C_size == 0) { $J_size = 0; return 0; } } # If here, either size grew, or we truncated and there's # stuff to read. # if (! flock(J, LOCK_EX)) { &log("couldn't flock LOCK_EX \"$Journal\": $!\n"); exit 1; } # Just stash stuff in here, so we can promptly relinquish the lock. # my @J; while () { push(@J, $_); } if (! flock(J, LOCK_UN)) { &log("couldn't flock LOCK_UN \"$Journal\": $!\n"); exit 1; } # Now process what we read... # foreach $_ (@J) { split_journal($_, $func); } @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } $J_size = $s[7]; return 1; } sub tail { my($Journal, $func) = @_; while (1) { if (open(J, "<$Journal")) { last; } if ($! =~ /^No such file or directory/i) { &log("Could not open journal \"$Journal\": $!\n"); exit 1; } sleep 1; } &log("opened $Journal\n"); if (! seek(J, 0, 2)) { &log("Could not seek to end of \"$Journal\": $!\n"); exit 1; } my @s = stat J; if ($#s < 0) { &log("fstat \"$Journal\" failed: $!\n"); exit 1; } $J_size = $s[7]; while (1) { &tail_check($func); sleep 1; # Recieve the souls of lost zombies... # waitpid(-1,&WNOHANG); } } my $have_change = 0; sub action { my ($name, $cmd) = @_; &log("$name> $cmd\n"); my $cmdout = `$cmd`; my $sts = $?; my $schr = ($sts ? "!" : "="); &log("$name$schr $cmdout\n"); return $sts; } sub waitfor { my ($name, $change) = @_; # To be safe, wait until "p4 counter change" reflects this change. # We won't wait forever, however; if we don't see the expected # change within a reasonable time, we just give up. # my $i; for ($i = 20; $i; $i--) { my $cur_change = `$P4 counter change 2>&1`; chop $cur_change; if ($cur_change >= $change) { return 1; } if ($i > 10) { sleep 1; } else { sleep 3; } } &log("$name: *** timed out waiting for the change counter!\n"); return 0; } my %Sync_last; sub sync_module { my($name, $dir, $change) = @_; # Only one per customer at a given change level # if (! ($change > $Sync_last{$name})) { return; } # Having this up here means that we'll only try triggering for this # $mod/$change one time. But relying on subsequent records (if the # change contained multiple matching files, for instance) for # retries after failures isn't right. Our contract is to bump the # handler action once for each applicable change set. # $Sync_last{$name} = $change; my $have = `$P4 counter $name 2>&1`; if ($change > $have) { &log("$name: $P4 sync -f \@$change,$change\n"); my $pid; if (! ($pid = fork())) { # In the child if ($dir ne "-" && (! chdir $dir)) { &log("name: *** could not cd to $dir: $!\n"); exit 1; } if (&waitfor($name, $change)) { my $sts; if ($name eq "sync_support") { if (! ($sts = &action($name, "ssh support /var/www/html/syncit $change"))) { &action($name, "$P4 counter $name $change 2>&1"); } exit $sts; } else { # Note: we rely on the $P4CONFIG stuff here! # delete $ENV{"PWD"}; delete $ENV{"USER"}; delete $ENV{"USERNAME"}; if (! ($sts = &action($name, "$P4 sync -f \@$change,$change 2>&1"))) { &action($name, "$P4 counter $name $change 2>&1"); } exit $sts; } } &log("$name: *** timed out waiting for the change counter update\n"); exit 1; } } } my %Ddfab_last; sub ddfab_module { my($name, $dir, $change) = @_; # Only one per customer at a given change level # if (! ($change > $Ddfab_last{$name})) { return; } # Having this up here means that we'll only try triggering for this # $mod/$change one time. But relying on subsequent records (if the # change contained multiple matching files, for instance) for # retries after failures isn't right. Our contract is to bump the # handler action once for applicable change set. # $Ddfab_last{$name} = $change; my $mod = $name; $mod =~ s/^.*_//; my $have = `$P4 counter $name 2>&1`; if ($change > $have) { &log("$name: ddfab_run_$mod apache 0002\n"); my $pid; if (! ($pid = fork())) { # In the child if (&waitfor($name, $change)) { # For the build daemon stuff, we just bump the # ddfab_run_$mod script; _it_ will decide when to update # the counter. # my $sts = &action($name, "/usr/bin/rsh build $Ddfab_dir/ddfab_run $mod apache 0002 2>&1"); exit $sts; } &log("name: *** timed out waiting for the change counter update\n"); exit 1; } } } sub handle_entry { my (@j) = @_; my @Perform_Actions; if ($Use_branch_actions) { if ($j[2] eq "db.domain" && $j[3] eq "b") { &load_branch_actions(); } @Perform_Actions = (@Actions, @Branch_Actions); } else { @Perform_Actions = @Actions; } foreach my $a (@Perform_Actions) { if ($a =~ /^\s*\#/ || $a =~ /^\s*$/) { next; } my($name, $op, $table, $path, $chdir, $action) = split(/\s+/, $a); # The check for !~ $path/test/ is a bit of a kludge, but # makes life easier for testfolk... # if ($j[0] eq $op && $j[2] eq $table && $j[3] =~ /$path/ && $j[3] !~ /\/\/prod\/[^\/]+\/test\//) { no strict 'refs'; &$action($name, $chdir, $j[7]); use strict 'refs'; } } } daemonize; &tail($Journal, "handle_entry");