mark: A photo of Mark kneeling on top of the Taal Volcano in the Philippines. It was a long hike. (Default)
Mark Smith ([staff profile] mark) wrote in [site community profile] changelog2009-08-25 06:26 pm

[dw-free] Remove a lot more cruft that's either ljcom specific or unused nowadays. Also, we don't ne

[commit: http://hg.dwscoalition.org/dw-free/rev/34b82504fb2d]

Remove a lot more cruft that's either ljcom specific or unused nowadays.
Also, we don't need dmtpd, drop that repo.

Patch by [staff profile] mark.

Files modified:
  • bin/evwatch
  • bin/lj-repo-own
  • bin/lj-upgrade
  • bin/statserv.pl
  • bin/trunk-update.pl
  • bin/upgrading/blobify_userpics.pl
  • bin/upgrading/migrate-phoneposts.pl
  • bin/upgrading/user-message-subs
  • bin/weblog-summarize.pl
  • cvs/multicvs.conf
--------------------------------------------------------------------------------
diff -r 1540a384cb42 -r 34b82504fb2d bin/evwatch
--- a/bin/evwatch	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,179 +0,0 @@
-#!/usr/bin/perl
-use strict;
-use warnings;
-use lib "$ENV{LJHOME}/cgi-bin";
-require 'ljlib.pl';
-
-$|++;
-
-use Errno qw(EAGAIN EWOULDBLOCK);
-use LJ::Blockwatch;
-use IO::Socket::INET;
-use Time::HiRes qw(tv_interval);
-
-my %files;
-my %filetimes;
-my $last_time_checked = time();
-
-my %time_averages;
-
-my %sockets; # fileno -> IO::Socket::INET socket
-my %socket_destinations; # fileno -> "hostname:port"
-
-
-#############################################################################
-# This block handles initial connections to the nodes we want to listen to.
-# The list is hard coded in the @destinations list for the moment.
-#############################################################################
-
-{
-    my %connecting_sockets;
-    my @destinations = qw(localhost:7600 127.0.0.1:7600);
-
-    foreach my $dest (@destinations) {
-        my $sock = IO::Socket::INET->new( PeerHost => $dest, Blocking => 0 ) or die "Couldn't connect: $!";
-        $connecting_sockets{$sock->fileno} = $sock;
-        $socket_destinations{$sock->fileno} = $dest;
-    }
-
-    sleep 3;
-
-    my $win = '';
-    foreach my $fd (keys %connecting_sockets) {
-        vec($win, $fd, 1) = 1;
-    }
-    select(undef, my $wout = $win, undef, 0);
-
-    while (my ($fd, $sock) = each %connecting_sockets) {
-        if (vec($wout, $fd, 1)) {
-            $sockets{$fd} = $sock;
-            $sock->write("evwatch\n");
-        }
-    }
-}
-
-die "Nothing allowed us to connect" unless keys %sockets;
-
-my %socket_buffers = map { ($_, '') } keys %sockets; # fileno -> buffer
-
-#############################################################################
-# This block handles listening to each of the sockets for reading and handing
-# the incoming data off to sub process_line anytime there has been a full
-# line read.
-#############################################################################
-
-while (1) {
-    my $rin = '';
-    foreach my $fd (keys %sockets) {
-        vec($rin, $fd, 1) = 1;
-    }
-    select(my $rout = $rin, undef, undef, undef);
-
-    # Read data from the sockets that are ready
-    SOCK: foreach my $fd (keys %sockets) {
-        my $sock = $sockets{$fd};
-        my $bufref = \$socket_buffers{$fd};
-
-        if (vec($rout, $fd, 1)) {
-            READ: while (1) {
-                my $length = sysread($sock, my $read_buffer, 1024);
-
-                if ($length) {
-                    $$bufref .= $read_buffer;
-                    next READ; # Read again, till we get a read error.
-                }
-
-                if ($! == EAGAIN || $! == EWOULDBLOCK) {
-                    last READ; # We've read all we can on this loop.
-                }
-
-                # Other errors mean we just close the connection and move on.
-                delete $sockets{$fd};
-                delete $socket_buffers{$fd};
-                next SOCK;
-            }
-
-            my $dest = $socket_destinations{$fd};
-
-            while ($$bufref =~ s/(.*?)\r?\n//) {
-                my $line = $1;
-                next unless $line;
-                my ($filename, $time, $utime, $direction, $event) = split /,/, $line;
-                process_line("${dest}${filename}", $time, $utime, $direction, $event);
-            }
-        }
-    }
-}
-
-#############################################################################
-# Process a line of incoming data, arguments are:
-#   label - hostname and filename concatted together
-#   time, utime - pair of integers that report when this event happened
-#   direction - boolean indicating the direction of this event
-#       begin is 0
-#       end is 1
-#   event - integer representing the event that occurred
-#############################################################################
-
-sub process_line {
-    my ($label, $time, $utime, $direction, $event) = @_;
-    my $filename = $label;
-    my $current_time = time();
-
-    $filetimes{$filename} = $current_time;
-    my $filedata = $files{$filename} ||= {};
-
-    my $eventdata = $filedata->{$event} ||= [];
-
-    if ($direction) { # backing out one operation
-        my $start_times = pop @$eventdata;
-        delete $filedata->{$event} unless @$eventdata;
-        return unless $start_times;
-        my $interval = tv_interval($start_times, [$time, $utime]);
-        my $average = \$time_averages{$event};
-        if (defined $$average) {
-            $$average *= .95;
-            $$average += ($interval * .05);
-        } else {
-            $$average = $interval;
-        }
-    } else { # adding an event
-        push @$eventdata, [$time, $utime];
-    }
-
-    if ($last_time_checked + 1 <= $current_time) {
-        $last_time_checked = $current_time;
-
-        foreach my $key (keys %filetimes) {
-            if ($filetimes{$key} < $current_time - 10) {
-                print "Removing $key.\n";
-                delete $filetimes{$key};
-                delete $files{$key};
-            }
-        }
-        dump_stats();
-    }
-}
-
-
-sub dump_stats {
-    while (my ($filename, $filedata) = each %files) {
-        next unless keys %$filedata;
-        print "For '$filename'\n";
-
-        while (my ($event, $times) = each %$filedata) {
-            my $event_name = LJ::Blockwatch->get_event_name($event);
-            print "  $event_name has " . @$times . " outstanding.\n";
-        }
-    } continue { print "\n"; }
-
-    foreach my $event (map {$_->[1]}
-                       sort {$a->[0] <=> $b->[0]}
-                       map { [$time_averages{$_}, $_] }
-                       keys %time_averages) {
-        my $time = $time_averages{$event};
-        my $event_name = LJ::Blockwatch->get_event_name($event);
-        printf "$time\t$event_name\n";
-    }
-    print "\n";
-}
diff -r 1540a384cb42 -r 34b82504fb2d bin/lj-repo-own
--- a/bin/lj-repo-own	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,45 +0,0 @@
-#!/usr/bin/perl
-#
-
-use strict;
-use Getopt::Long;
-
-my $anon = 0;
-my $user = "";
-
-sub usage () {
-        die "
-Usage:
-   lj-repo-own --anonymous
-   lj-repo-own --user=<danga_username>
-";
-}
-
-usage() unless GetOptions(
-        "anonymous" => \$anon,
-        "user=s"    => \$user,
-        );
-
-usage() if      $anon && $user;
-usage() unless  $anon || $user =~ /^\w+$/;
-
-my $src = $anon ?
-        ":pserver:anonymous\@cvs.livejournal.org:" :
-        "$user\@cvs.danga.com:";
-
-chdir "$ENV{LJHOME}" or die;
-
-my @files = `find cvs -type f -path '*CVS/Root'`;
-chomp @files;
-foreach my $f (@files) {
-    open(R, $f) or die;
-    my $line = <R>;
-    close R;
-    print "in $f\tfrom $line ";
-    $line =~ s/.+:/$src/;
-    print "  to $line\n";
-    open(W, ">$f") or die;
-    print W $line;
-    close W;
-}
-
diff -r 1540a384cb42 -r 34b82504fb2d bin/lj-upgrade
--- a/bin/lj-upgrade	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,35 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use IO::Socket::INET;
-
-unless ($ENV{LJHOME}) {
-    die "\$LJHOME not set.";
-}
-chdir "$ENV{LJHOME}" or die "Failed to chdir to \$LJHOME";
-
-system("cvsreport.pl", "-u", "-c", "-s")
-    and die "Failed to run cvsreport.pl with update.";
-
-system("cvsreport.pl", "-c", "-s")
-    and die "Failed to run cvsreport.pl second time.";
-
-system("bin/upgrading/update-db.pl", "-r", "-p")
-    and die "Failed to run update-db.pl with -r/-p";
-
-system("bin/upgrading/update-db.pl", "-r", "--cluster=all")
-    and die "Failed to run update-db.pl on all clusters";
-
-system("bin/upgrading/texttool.pl", "load")
-    and die "Failed to run texttool.pl load";
-
-print "Restarting apache...\n";
-
-my $sock = IO::Socket::INET->new(PeerAddr => "127.0.0.1:7600")
-        or die "Couldn't connect to webnoded (port 7600)\n";
-
-print $sock "apr\r\n";
-while (my $ln = <$sock>) {
-    print "$ln";
-    last if $ln =~ /^OK/;
-}
diff -r 1540a384cb42 -r 34b82504fb2d bin/statserv.pl
--- a/bin/statserv.pl	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,204 +0,0 @@
-#!/usr/bin/perl -w
-# LiveJournal statistics server. Sits on a UDP port and journals
-# information on the incoming hit rate, manages site bans, etc.
-# Loosely based on the ljrpcd code to save typing ;)
-#
-
-use strict;
-use IO::Socket;
-use IO::Handle;
-use Proc::ProcessTable;
-use DBI;
-
-require "$ENV{'LJHOME'}/cgi-bin/ljlib.pl";
-
-# Max message length and port to bind.
-my $MAXLEN = 512;
-my $PORTNO = 6200;
-my $PIDFILE = '/home/lj/var/statserv.pid';
-my $LOGDIR = '/home/lj/logs/';
-
-# Maximum amount of hits they can use in five minutes.
-my %maxes = ( 'ip' => 15, 'guest' => 20, 'user' => 25 );
-
-# Pid and pidfile.
-my $pid;
-my $is_parent = 1;
-# Socket. Needs to be here for the HUP stuff.
-my $sock;
-# Cache hash.
-my %caches = ();
-# Cache array.
-my @events = ();
-
-# Exceptions hash (IP range or username as keys)
-# If you want some host (such as a big stupid random proxy) to
-# be more lenient with the number of hits it can make in five minutes,
-# put the value in here. If value is -1, then there is no limit.
-my %except = ();
-
-# In case we're shot, unlink the pidfile.
-$SIG{TERM} = sub {
-    unlink($PIDFILE);
-    exit 1;
-};
-
-# Local network bind to.
-my $MYNET = '10.0';
-
-if (-e $PIDFILE) {
-    open (PID, $PIDFILE);
-    my $tpid;
-    chomp ($tpid = <PID>);
-    close PID;
-    my $processes = Proc::ProcessTable->new()->table;
-    if (grep { $_->cmndline =~ /statserv/ } @$processes) {
-        print "Process exists already, quitting.\n";
-        exit 1;
-    }
-}
-
-print "LiveJournal Statistics Daemon starting up into the background...\n";
-
-if ($pid = fork) {
-  # Parent, log pid and exit.
-  open(PID, ">$PIDFILE")   or die "Couldn't open $PIDFILE for writing: $!\n";
-  print PID $pid;
-  close(PID);
-  print "Closing ($pid) wrote to $PIDFILE\n";
-  $is_parent = 1;
-  exit;
-} else {
-  # This is the child.
-  my($cmdmsg, $remaddr, $remhost);
-
-  # HUP signal handler.
-  $SIG{HUP} = \&restart_request;
-  # SIGUSR handler.
-  $SIG{USR1} = sub { open_logfile(); };
-
-  open_logfile();
-
-  $sock = IO::Socket::INET->new(LocalPort => "$PORTNO", Proto => 'udp')	or die "socket: $@";
-
-  # Main loop.
-  while ($sock->recv($cmdmsg, $MAXLEN)) {
-    my ($port, $ipaddr) = sockaddr_in($sock->peername);
-    my $ip_addr = inet_ntoa($ipaddr);
-
-    # Make sure it's from around here.
-    if ($ip_addr !~ m/^$MYNET/) {
-      print "Got message from an invalid host.\n";
-      next;
-    }
-
-    # Quick command parsing, since there isn't much to it.
-    if ($cmdmsg =~ s/^cmd:\s//) {
-        handle_request($cmdmsg);
-        next;
-    }
-  }
-  die "recv: $!\n";
-} 
-
-# Sub to restart the daemon.
-sub restart_request {
-  $sock->close;
-  unlink($PIDFILE);
-  exec($0);
-}
-
-
-# Handle the request. This updates the appropriate caches,
-# and may set a ban.
-# Requests look like:
-# cmd: cachename : ip_addr : type : url
-# type can be: ip, guest, or user
-# If type is "ip" then cachename can be anything. I suggest 
-# it be set to "ip" as well. If just to save space.
-sub handle_request {
-  my $cmd = shift;
-  my $now = time();
-
-  # Clear expired events.
-  clean_events($now);
-  # As of now, we don't care about the URL, really.
-  if ($cmd =~ m/^(\w+)\s:\s([\d\.]+)\s:\s(\w+)/) {
-      my $user = $1;
-      my $ip_addr = $2;
-      my $type = $3;
-      # If there was no cookie of any kind, the type 
-      # name is set to "ip" - in this case we up the
-      # cache number for the IP range. 
-      if ($type eq "ip") {
-          # This regex is dumb, but the data we have is trustable.
-          $user = $ip_addr;
-          $user =~ s/(\d+)\.(\d+)\.(\d+)\.(\d+)/$1\.$2\.$3\./;
-      }
-      unless (exists $caches{$user}) {
-          $caches{$user} = { 'numhit' => 0, 'type' => $type };
-      }
-      push @events, [ $user, $now ];
-      $caches{$user}->{'numhit'}++;
-
-      # Now we check to see if they have hit too fast, and ban if so.
-      if (should_ban($user)) {
-          # FIXME: For final operation, this should be replaced with
-          # a call to set_ban(). This is also going to spam a ton,
-          # but with the "spiffy" algorithm I can't easily nuke a user.
-          print "Would have banned user $user. Hits: " . $caches{$user}->{'numhit'} . "\n";
-      }
-      # After this, "add_stat($user, $type, $url)" should run.
-  } else {
-      print "Got a mal-formed request: $cmd\n";
-  }
-
-}
-
-# Returns 1 if the passed "user" should be banned, 0 if not.
-sub should_ban {
-    my $user = shift;
-
-    my $max = $except{$user} || $maxes{$caches{$user}->{'type'}} || 0; 
-    # If it doesn't have a defined class, do we really want it around?
-    return 1 unless ($max);
-    return 1 if ($caches{$user}->{'numhit'} > $max);
-
-    return 0;
-}
-
-# Removes old events, and decrements caches.
-sub clean_events {
-    my $now = shift;
-    while (@events && $events[0]->[1] < $now - 360) {
-        my $deadevt = shift @events;
-        if (--$caches{$deadevt->[0]}->{'numhits'} < 1) {
-            delete $caches{$deadevt->[0]};
-        }
-    }
-}
-
-# Placeholder. Sets a ban in the database.
-sub set_ban {
-
-}
-
-# Placeholder. Runs various stats collections.
-sub add_stat {
-
-}
-
-# Opens a new tagged logfile. Also sets it to the default
-# filehandle, sets autoflush, and returns the new handle.
-sub open_logfile {
-    my $now = time();
-    my $logname = $LOGDIR . "statserv-" . $now . "\.log\n";
-    my $logfh = new IO::Handle;
-    open($logfh, ">> $logname") or die "Couldn't open $logname: $!\n";
-    my $oldfh = select($logfh);
-    # Make sure the old one is closed.
-    close($oldfh);
-    # Set autoflush and return.
-    $| = 1;
-    return $logfh;
-}
diff -r 1540a384cb42 -r 34b82504fb2d bin/trunk-update.pl
--- a/bin/trunk-update.pl	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,99 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use IO::Socket::INET;
-
-unless ($ENV{LJHOME}) {
-    die "\$LJHOME not set.";
-}
-chdir "$ENV{LJHOME}" or die "Failed to chdir to \$LJHOME";
-
-my $cvsreport = "$ENV{LJHOME}/bin/cvsreport.pl";
-
-die "cvsreport.pl missing or unexecutable" unless -x $cvsreport;
-
-require "$ENV{LJHOME}/cgi-bin/ljlib.pl";
-die "NO DO NOT RUN THIS IN PRODUCTION" if $LJ::IS_LJCOM_PRODUCTION;
-
-
-update_svn();
-my @files = get_updated_files();
-sync();
-new_phrases() if  grep { /en.+\.dat/ } @files;
-update_db() if  grep { /\.sql/ } @files;
-bap() if  grep { /cgi-bin.+\.[pl|pm]/ } @files;
-
-
-my $updatedfilepath = "$ENV{LJHOME}/logs/trunk-last-updated.txt";
-my $updatedfh;
-open($updatedfh, ">$updatedfilepath") or return "Could not open file $updatedfilepath: $!\n";
-print $updatedfh time();
-close $updatedfh;
-
-if (@files) {
-    exit 0;
-}
-
-exit 1;
-
-
-
-sub update_svn {
-    system($cvsreport, "-u", "--checkout")
-	and die "Failed to run cvsreport.pl with update.";
-}
-
-sub get_updated_files {
-    my @files = ();
-    open(my $cr, '-|', $cvsreport, '-c', '-1') or die "Could not run cvsreport.pl";
-    while (my $line = <$cr>) {
-	$line =~ s/\s+$//;
-	push @files, $line;
-    }
-    close($cr);
-
-    return @files;
-}
-
-sub sync {
-    system($cvsreport, "-c", "-s")
-	and die "Failed to run cvsreport.pl sync second time.";
-}
-
-sub update_db {
-    foreach (1..10) {
-        my $res = system("bin/upgrading/update-db.pl", "-r", "-p");
-        last if $res == 0;
-
-        if ($res & 127 == 9) {
-            warn "Killed by kernel (ran out of memory) sleeping and retrying";
-            sleep 60;
-            next;
-        }
-
-        die "Unknown exit state of `update-db.pl -r -p`: $res";
-    }
-
-    system("bin/upgrading/update-db.pl", "-r", "--cluster=all")
-	and die "Failed to run update-db.pl on all clusters";
-}
-
-sub new_phrases {
-    my @langs = @_;
-
-    system("bin/upgrading/texttool.pl", "load", @langs)
-	and die "Failed to run texttool.pl load @langs";
-}
-
-sub bap {
-    print "Restarting apache...\n";
-
-    my $sock = IO::Socket::INET->new(PeerAddr => "127.0.0.1:7600")
-        or die "Couldn't connect to webnoded (port 7600)\n";
-
-    print $sock "apr\r\n";
-    while (my $ln = <$sock>) {
-	print "$ln";
-	last if $ln =~ /^OK/;
-    }
-}
diff -r 1540a384cb42 -r 34b82504fb2d bin/upgrading/blobify_userpics.pl
--- a/bin/upgrading/blobify_userpics.pl	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,72 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use Getopt::Long;
-
-# load libraries now
-use lib "$ENV{'LJHOME'}/cgi-bin";
-use LJ::Blob;
-use Image::Size ();
-require "ljlib.pl";
-
-my $opt_fast;
-exit 1 unless
-    GetOptions("fast" => \$opt_fast,
-               );
-
-my $clusterid = shift;
-die "Usage: blobify_userpics.pl <clusterid>\n"
-    unless $clusterid;
-
-my $db = LJ::get_cluster_master($clusterid);
-die "Invalid/down cluster: $clusterid\n" unless $db;
-
-print "Getting count.\n";
-my $total = $db->selectrow_array("SELECT COUNT(*) FROM userpicblob2");
-my $done = 0;
-
-my $loop = 1;
-while ($loop) {
-    $loop = 0;
-    LJ::start_request();  # shrink caches
-    print "Getting 200.\n";
-    my $sth = $db->prepare("SELECT userid, picid, imagedata FROM userpicblob2 LIMIT 200");
-    $sth->execute;
-    while (my ($uid, $picid, $image) = $sth->fetchrow_array) {
-        $loop = 1;
-        my $u = LJ::load_userid($uid);
-        die "Can't find userid: $uid" unless $u;
-
-        # sometimes expunges don't expunge all the way.
-        if ( $u->is_expunged ) {
-            $db->do("DELETE FROM userpicblob2 WHERE userid=$uid AND picid=$picid");
-            next;
-        }
-        
-        my ($sx, $sy, $fmt) = Image::Size::imgsize(\$image);
-        die "Unknown format" unless $fmt eq "GIF" || $fmt eq "JPG" || $fmt eq "PNG";
-        $fmt = lc($fmt);
-        
-        my $err;
-        my $rv = LJ::Blob::put($u, "userpic", $fmt, $picid, $image, \$err);
-        die "Error putting file: $u->{'user'}/$picid\n" unless $rv;
-
-        unless ($opt_fast) {
-            # extra paranoid!
-            my $get = LJ::Blob::get($u, "userpic", $fmt, $picid);
-            die "Re-fetch didn't match" unless $get eq $image;
-        }
-
-        $db->do("DELETE FROM userpicblob2 WHERE userid=$uid AND picid=$picid");
-
-        $done++;
-        printf " Moved $uid/$picid.$fmt ($done/$total = %.2f%%)\n", ($done / $total * 100);
-    }
-}
-
-my $end_ct = $db->selectrow_array("SELECT COUNT(*) FROM userpicblob2");
-if ($end_ct == 0) {
-    $db->do("TRUNCATE TABLE userpicblob2");
-}
-print "Done.\n";
-
diff -r 1540a384cb42 -r 34b82504fb2d bin/upgrading/migrate-phoneposts.pl
--- a/bin/upgrading/migrate-phoneposts.pl	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,323 +0,0 @@
-#!/usr/bin/perl
-
-use strict;
-use lib "$ENV{LJHOME}/cgi-bin";
-require 'ljlib.pl';
-use LJ::Blob;
-use LJ::User;
-use Getopt::Long;
-use IPC::Open3;
-use Digest::MD5;
-
-# this script is a migrater that will move phone posts from an old storage method
-# into mogilefs.
-
-# the basic theory is that we iterate over all clusters, find all phoneposts that
-# aren't in mogile right now, and put them there
-
-# determine 
-my ($one, $besteffort, $dryrun, $user, $verify, $verbose, $clusters, $purge);
-my $rv = GetOptions("best-effort"  => \$besteffort,
-                    "one"          => \$one,
-                    "dry-run"      => \$dryrun,
-                    "user=s"       => \$user,
-                    "verify"       => \$verify,
-                    "verbose"      => \$verbose,
-                    "purge-old"    => \$purge,
-                    "clusters=s"   => \$clusters,);
-unless ($rv) {
-    die <<ERRMSG;
-This script supports the following command line arguments:
-
-    --clusters=X[-Y]
-        Only handle clusters in this range.  You can specify a single
-        number, or a range of two numbers with a dash.
-
-    --user=username
-        Only move this particular user.
-        
-    --one
-        Only move one user.  (But it moves all their phone posts.)  This is
-        used for testing.
-
-    --verify
-        If specified, this option will reload the phonepost from MogileFS and
-        make sure it's been stored successfully.
-
-    --dry-run
-        If on, do not update the database.  This mode will put the phonepost
-        in MogileFS and give you paths to examine the phone posts and make
-        sure everything is okay.  It will not update the phonepost2 table,
-        though.
-
-    --best-effort
-        Normally, if a problem is encountered (null phonepost, md5 mismatch,
-        connection failure, etc) the script will die to make sure
-        everything goes well.  With this flag, we don't die and instead
-        just print to standard error.
-
-    --purge-old
-        Sometimes we run into data that is for users that have since
-        moved to a different cluster.  Normally we ignore it, but
-        with this option, we'll clean that data up as we find it.
-
-    --verbose
-        Be very chatty.
-ERRMSG
-}
-
-# make sure ljconfig is setup right (or so we hope)
-die "Please define a 'phoneposts' class in your \%LJ::MOGILEFS_CONFIG\n"
-    unless defined $LJ::MOGILEFS_CONFIG{classes}->{phoneposts};
-die "Unable to find MogileFS object (\%LJ::MOGILEFS_CONFIG not setup?)\n"
-    unless $LJ::MogileFS;
-
-# setup stderr if we're in best effort mode
-if ($besteffort) {
-    my $oldfd = select(STDERR);
-    $| = 1;
-    select($oldfd);
-}
-
-# operation modes
-if ($user) {
-    # move a single user
-    my $u = LJ::load_user($user);
-    die "No such user: $user\n" unless $u;
-    handle_userid($u->{userid}, $u->{clusterid});
-    
-} else {
-    # parse the clusters
-    my @clusters;
-    if ($clusters) {
-        if ($clusters =~ /^(\d+)(?:-(\d+))?$/) {
-            my ($min, $max) = map { $_ + 0 } ($1, $2 || $1);
-            push @clusters, $_ foreach $min..$max;
-        } else {
-            die "Error: --clusters argument not of right format.\n";
-        }
-    } else {
-        @clusters = @LJ::CLUSTERS;
-    }
-    
-    # now iterate over the clusters to pick
-    my $ctotal = scalar(@clusters);
-    my $ccount = 0;
-    foreach my $cid (sort { $a <=> $b } @clusters) {
-        # status report
-        $ccount++;
-        print "\nChecking cluster $cid...\n\n";
-
-        # get a handle
-        my $dbcm = get_db_handle($cid);
-
-        # get all userids
-        print "Getting userids...\n";
-        my $limit = $one ? 'LIMIT 1' : '';
-        my $userids = $dbcm->selectcol_arrayref
-            ("SELECT DISTINCT userid FROM phonepostentry WHERE (location='blob' OR location IS NULL) $limit");
-        my $total = scalar(@$userids);
-
-        # iterate over userids
-        my $count = 0;
-        print "Beginning iteration over userids...\n";
-        foreach my $userid (@$userids) {
-            # move this phonepost
-            my $extra = sprintf("[%6.2f%%, $ccount of $ctotal] ", (++$count/$total*100));
-            handle_userid($userid, $cid, $extra);
-        }
-
-        # don't hit up more clusters
-        last if $one;
-    }
-}
-print "\n";
-
-print "Updater terminating.\n";
-
-#############################################################################
-### helper subs down here
-
-# take a userid and move their phone posts
-sub handle_userid {
-    my ($userid, $cid, $extra) = @_;
-    
-    # load user to move and do some sanity checks
-    my $u = LJ::load_userid($userid);
-    unless ($u) {
-        LJ::end_request();
-        LJ::start_request();
-        $u = LJ::load_userid($userid);
-    }
-    die "ERROR: Unable to load userid $userid\n"
-        unless $u;
-
-    # if they're expunged, they might have data somewhere if they were
-    # copy-moved from A to B, then expunged on B.  now we're on A and
-    # need to delete it ourselves (if purge-old is on)
-    if ( $u->{clusterid} == 0 && $u->is_expunged ) {
-        return unless $purge;
-        # if we get here, the user has indicated they want data purged, get handle
-        my $to_purge_dbcm = get_db_handle($cid);
-        my $ct = $to_purge_dbcm->do("DELETE FROM phonepostentry WHERE userid = ?", undef, $u->{userid});
-        print "\tnotice: purged $ct old rows.\n\n"
-            if $verbose;
-        return;
-    }
-
-    # get a handle
-    my $dbcm = get_db_handle($u->{clusterid});
-
-    # print that we're doing this user
-    print "$extra$u->{user}($u->{userid})\n";
-
-    # if a user has been moved to another cluster, but the source data from
-    # phonepostentry wasn't deleted, we need to ignore the user or purge their data
-    if ($u->{clusterid} != $cid) {
-        return unless $purge;
-
-        # verify they have some rows on the new side
-        my $count = $dbcm->selectrow_array
-            ("SELECT COUNT(*) FROM phonepostentry WHERE userid = ?",
-             undef, $u->{userid});
-        return unless $count;
-    
-        # if we get here, the user has indicated they want data purged, get handle
-        my $to_purge_dbcm = get_db_handle($cid);
-
-        # delete the old data
-        if ($dryrun) {
-            print "\tnotice: need to delete phonepostentry rows.\n\n"
-                if $verbose;
-        } else {
-            my $ct = $to_purge_dbcm->do("DELETE FROM phonepostentry WHERE userid = ?", undef, $u->{userid});
-            print "\tnotice: purged $ct old rows.\n\n"
-                if $verbose;
-        }
-
-        # nothing else to do here
-        return;
-    } 
-
-    # get all their photos that aren't in mogile already
-    my $rows = $dbcm->selectall_arrayref
-        ("SELECT filetype, blobid FROM phonepostentry WHERE userid = ? ".
-         "AND (location = 'blob' OR location IS NULL)",
-         undef, $u->{userid});
-    return unless @$rows;
-
-    # if a user has been moved to another cluster, but the source data from
-    # phonepost2 wasn't deleted, we need to ignore the user
-    return unless $u->{clusterid} == $cid;
-
-    # now we have a userid and blobids, get the photos from the blob server
-    foreach my $row (@$rows) {
-        my ($filetype, $blobid) = @$row;
-        print "\tstarting move for blobid $blobid\n"
-            if $verbose;
-        my $format = { 0 => 'mp3', 1 => 'ogg', 2 => 'wav' }->{$filetype};
-        my $data = LJ::Blob::get($u, "phonepost", $format, $blobid);
-
-        # get length
-        my $len = length($data);
-
-        if (! $len) {
-            my $has_blob = $dbcm->selectrow_array("SELECT COUNT(*) FROM userblob WHERE ".
-                                                  "journalid=? AND domain=? AND blobid=?",
-                                                  undef, $u->{userid},
-                                                  LJ::get_blob_domainid("phonepost"),
-                                                  $blobid);
-            if (! $has_blob) {
-                $dbcm->do("UPDATE phonepostentry SET location='none' ".
-                          "WHERE userid=? AND blobid=?", undef, $u->{userid}, $blobid);
-                print "\tnote: changed phonepost entry location to 'none'\n\n"
-                    if $verbose;
-                next;
-            }
-        }
-
-        if ($besteffort && !$len) {
-            print STDERR "empty_phonepost userid=$u->{userid} blobid=$blobid\n";
-            print "\twarning: empty phonepost.\n\n"
-                if $verbose;
-            next;
-        }
-        die "Error: data from blob empty ($u->{user}, 'phonepost', $format, $blobid)\n"
-            unless $len;
-
-        # get filehandle to Mogile and put the file there
-        print "\tdata length = $len bytes, uploading to MogileFS...\n"
-            if $verbose;
-        my $fh = $LJ::MogileFS->new_file("pp:$u->{userid}:$blobid", 'phoneposts');
-        if ($besteffort && !$fh) {
-            print STDERR "new_file_failed userid=$u->{userid} blobid=$blobid\n";
-            print "\twarning: failed in call to new_file\n\n"
-                if $verbose;
-            next;
-        }
-        die "Unable to get filehandle to save file to MogileFS\n"
-            unless $fh;
-
-        # now save the file and close the handles
-        $fh->print($data);
-        my $rv = $fh->close;
-        if ($besteffort && !$rv) {
-            print STDERR "close_failed userid=$u->{userid} blobid=$blobid reason=$@\n";
-            print "\twarning: failed in call to cloes: $@\n\n"
-                if $verbose;
-            next;
-        }
-        die "Unable to save file to MogileFS: $@\n"
-            unless $rv;
-
-        # extra verification
-        if ($verify) {
-            my $data2 = $LJ::MogileFS->get_file_data("pp:$u->{userid}:$blobid");
-            my $eq = ($data2 && $$data2 eq $data) ? 1 : 0;
-            if ($besteffort && !$eq) {
-                print STDERR "verify_failed userid=$u->{userid} blobid=$blobid\n";
-                print "\twarning: verify failed; phone post not updated\n\n"
-                    if $verbose;
-                next;
-            }
-            die "\tERROR: phone post NOT stored successfully, content mismatch\n"
-                unless $eq;
-            print "\tverified length = " . length($$data2) . " bytes...\n"
-                if $verbose;
-        }
-
-        # done moving this phone post
-        unless ($dryrun) {
-            print "\tupdating database for this phone post...\n"
-                if $verbose;
-            $dbcm->do("UPDATE phonepostentry SET location = 'mogile' WHERE userid = ? AND blobid = ?",
-                      undef, $u->{userid}, $blobid);
-        }
-
-        # get the paths so the user can verify if they want
-        if ($verbose) {
-            my @paths = $LJ::MogileFS->get_paths("pp:$u->{userid}:$blobid", 1);
-            print "\tverify mogile path: $_\n" foreach @paths;
-            print "\tphone post update complete.\n\n";
-        }
-    }
-}
-
-# a sub to get a cluster handle and set it up for our use
-sub get_db_handle {
-    my $cid = shift;
-    
-    my $dbcm = LJ::get_cluster_master({ raw => 1 }, $cid);
-    unless ($dbcm) {
-        print STDERR "handle_unavailable clusterid=$cid\n";
-        die "ERROR: unable to get raw handle to cluster $cid\n";
-    }
-    eval {
-        $dbcm->do("SET wait_timeout = 28800");
-        die $dbcm->errstr if $dbcm->err;
-    };
-    die "Couldn't set wait_timeout on $cid: $@\n" if $@;
-    $dbcm->{'RaiseError'} = 1;
-    
-    return $dbcm;
-}
diff -r 1540a384cb42 -r 34b82504fb2d bin/upgrading/user-message-subs
--- a/bin/upgrading/user-message-subs	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,77 +0,0 @@
-#! /usr/bin/perl
-use strict;
-use lib "$ENV{LJHOME}/cgi-bin";
-require "ljlib.pl";
-
-my $dbslo = LJ::get_dbh("slow")
-    or die "cannot connect to slow role";
-
-my $limit = 5000;
-my $uids_done = 0;
-my $last_max_uid = 0;
-
-my $max_uid = $dbslo->selectrow_array("SELECT MAX(userid) FROM user")+0;
-
-print "Populating userids from $last_max_uid through $max_uid\n";
-
-# scary, i know... but we'll last out if we ever get less than $limit uids
-my $start_time = time();
-while (1) {
-    my $loop_begin = time();
-
-    # Let's call start_request
-    # -- so our in-process $u caches don't get unreasonable
-    # -- so we revalidate our database handles
-
-    LJ::start_request();
-    $dbslo = LJ::get_dbh("slow")
-	or die "cannot connect to slow role";
-
-    # load user rows from slow
-    my $sth = $dbslo->prepare
-	("SELECT * FROM user WHERE userid>? AND statusvis!='X' AND journaltype IN ('P', 'I') ORDER BY userid LIMIT $limit");
-    $sth->execute($last_max_uid);
-    die $dbslo->errstr if $dbslo->err;
-
-    # construct user objects from them since we have the full data around
-    my %user_rows = (); # uid => $row
-    while (my $row = $sth->fetchrow_hashref) {
-	$user_rows{$row->{userid}} = LJ::User->new_from_row($row);
-    }
-    last unless %user_rows;
-
-    # now update each one
-    while (my ($uid, $u) = each %user_rows) {
-        next if $u->is_expunged;
-
-        my %params = (event => 'UserMessageRecvd', journal => $u);
-        unless ($u->has_subscription(%params)) {
-            $u->subscribe(%params, method => $_) foreach qw(Email);
-        }
-
-	$last_max_uid = $uid if $uid > $last_max_uid;
-	$uids_done++;
-    }
-
-    # update max userid every so often for our pretty status display
-    if ($uids_done % 10_000 == 0) {
-	$max_uid = $dbslo->selectrow_array("SELECT MAX(userid) FROM user")+0;
-    }
-
-    printf ("[%.2f] $uids_done - current id $last_max_uid - %.2f hours\n",
-	    100*($last_max_uid / ($max_uid || 1)), ($max_uid - $last_max_uid) / (($uids_done || 1) / ((time() - $start_time) || 1)) / 3600
-	    );
-
-    # we're done if we got less than the limit
-    last if scalar (keys %user_rows) < $limit;
-
-    # each of these loops should take two minutes. rate limiting.
-    if (time() - $loop_begin < 120) {
-        my $delay = 120 - (time() - $loop_begin);
-        sleep $delay;
-    }
-}
-
-print "All done!\n";
-
-1;
diff -r 1540a384cb42 -r 34b82504fb2d bin/weblog-summarize.pl
--- a/bin/weblog-summarize.pl	Tue Aug 25 18:10:33 2009 +0000
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,74 +0,0 @@
-#!/usr/bin/perl
-#
-
-use strict;
-
-require "$ENV{'LJHOME'}/cgi-bin/ljlib.pl";
-
-my $lock = LJ::locker()->trylock("weblog-summarize");
-exit 0 unless $lock;
-print "Starting.\n";
-
-my %name;
-while (<DATA>) {
-    next unless /(\S+)\s*\-\s*(.+)/;
-    $name{$1} = $2;
-}
-
-my $db = LJ::get_dbh("logs");
-
-my @tables;
-my $sth = $db->prepare("SHOW TABLES LIKE 'access%'");
-$sth->execute;
-push @tables, $_ while $_ = $sth->fetchrow_array;
-
-for (1..10*24) { pop @tables; }
-
-my $ct;
-my $sth;
-
-$| = 1;
-
-foreach my $t (@tables) {
-    my $file = $t;
-    $file =~ s/^access//;
-    $file = "$LJ::HOME/var/stats-$file";
-    next if -e "$file.gz";
-    open (E, ">$file");
-
-    print "$t\n";
-
-    print "  hits...";
-    $ct = $db->selectrow_array("SELECT COUNT(*) FROM $t");
-    print E "count\thits\t$ct\n";
-    print $ct, "\n";
-
-    print "  bytes...";
-    $ct = $db->selectrow_array("SELECT SUM(bytes) FROM $t");
-    print E "count\tbytes\t$ct\n";
-    print $ct, "\n";
-
-    print "  ljusers...";
-    $ct = $db->selectrow_array("SELECT COUNT(DISTINCT ljuser) FROM $t");
-    print E "count\tuniq_ljuser\t$ct\n";
-    print $ct, "\n";
-    
-    print "  codepath...\n";
-    $sth = $db->prepare("SELECT codepath, COUNT(*) FROM $t GROUP BY 1 ORDER BY 2 DESC");
-    $sth->execute;
-    while (my ($p, $ct) = $sth->fetchrow_array) {
-	print E "codepath\t$p\t$ct\n";
-    }
-
-    print "  status...\n";
-    $sth = $db->prepare("SELECT status, COUNT(*) FROM $t GROUP BY 1 ORDER BY 2 DESC");
-    $sth->execute;
-    while (my ($s, $ct) = $sth->fetchrow_array) {
-	print E "status\t$s\t$ct\n";
-    }
-
-    close E;
-    system("/bin/gzip", $file) and die "Error gzipping $t\n";
-    $db->do("DROP TABLE $t");
-}
-
diff -r 1540a384cb42 -r 34b82504fb2d cvs/multicvs.conf
--- a/cvs/multicvs.conf	Tue Aug 25 18:10:33 2009 +0000
+++ b/cvs/multicvs.conf	Tue Aug 25 18:26:14 2009 +0000
@@ -32,7 +32,6 @@ SVN(Data-ObjectDriver)    = http://code.
 #SVN(openid)               = http://code.livejournal.org/svn/openid/trunk/
 SVN(ddlockd)              = http://code.livejournal.org/svn/ddlockd/trunk/
 SVN(miscperl)             = http://code.livejournal.org/svn/miscperl/trunk/
-SVN(dmtp)                 = http://code.livejournal.org/svn/dmtp/trunk/
 SVN(Test-FakeApache)      = http://code.livejournal.org/svn/Test-FakeApache/trunk/
 SVN(LJ-UserSearch)        = http://code.livejournal.org/svn/LJ-UserSearch/trunk/
 SVN(TheSchwartz-Worker-SendEmail) = http://code.livejournal.org/svn/TheSchwartz-Worker-SendEmail/trunk/
@@ -78,8 +77,6 @@ memcached/api/perl/lib/                 
 
 ddlockd/api/perl/DDLockClient.pm              cgi-bin/DDLockClient.pm
 ddlockd/server/ddlockd                        bin/ddlockd
-
-dmtp/server/dmtpd                             bin/dmtpd
 
 mogilefs/api/perl/MogileFS-Client/lib         cgi-bin
 mogilefs/utils/mogadm                         bin/mogadm
--------------------------------------------------------------------------------