#!/usr/bin/perl -w
use strict;
-use vars qw( $opt_d $opt_s $opt_q $opt_v $opt_r $opt_c );
+use vars qw( $opt_d $opt_s $opt_q $opt_v $opt_r $opt_c $opt_j $opt_a );
use vars qw( $DEBUG $DRY_RUN );
use Getopt::Std;
-use DBIx::DBSchema 0.31; #0.39
+use DBD::Pg qw(:async); #for -a
+use DBIx::DBSchema 0.46;
use FS::UID qw(adminsuidsetup checkeuid datasrc driver_name);
use FS::CurrentUser;
use FS::Schema qw( dbdef dbdef_dist reload_dbdef );
die "Not running uid freeside!" unless checkeuid();
-getopts("dqrcs");
+getopts("dqrcsja");
$DEBUG = !$opt_q;
#$DEBUG = $opt_v;
my $dbh = adminsuidsetup($user);
$FS::UID::callback_hack = 0;
+# pass command line opts through to upgrade* routines
+my %upgrade_opts = (
+ quiet => $opt_q,
+ verbose => $opt_v,
+ queue => $opt_j,
+ # others?
+);
+
if ( driver_name =~ /^mysql/i ) { #until 0.39 is required above
eval "use DBIx::DBSchema 0.39;";
die $@ if $@;
my @bugfix = ();
-if (dbdef->table('cust_main')->column('agent_custid') && ! $opt_s) {
- push @bugfix,
- "UPDATE cust_main SET agent_custid = NULL where agent_custid = ''";
-
- push @bugfix,
- "UPDATE h_cust_main SET agent_custid = NULL where agent_custid = ''"
- if (dbdef->table('h_cust_main'));
-}
-
-if ( dbdef->table('cgp_rule_condition') &&
- dbdef->table('cgp_rule_condition')->column('condition')
- )
-{
- push @bugfix,
- "ALTER TABLE ${_}cgp_rule_condition RENAME COLUMN condition TO conditionname"
- for '', 'h_';
-
-}
-
-if ( dbdef->table('areacode') and
- dbdef->table('areacode')->primary_key eq 'code' )
-{
- if ( driver_name =~ /^mysql/i ) {
- push @bugfix,
- 'ALTER TABLE areacode DROP PRIMARY KEY',
- 'ALTER TABLE areacode ADD COLUMN (areanum int auto_increment primary key)';
- }
- else {
- push @bugfix, 'ALTER TABLE areacode DROP CONSTRAINT areacode_pkey';
- }
-}
-
-if ( dbdef->table('upgrade_journal') ) {
- push @bugfix, "SELECT SETVAL( 'upgrade_journal_upgradenum_seq',
- ( SELECT MAX(upgradenum) FROM upgrade_journal )
- )
- ";
-}
-
if ( $DRY_RUN ) {
- print
- join(";\n", @bugfix ). ";\n";
-} elsif ( @bugfix ) {
-
+ print join(";\n", @bugfix ). ";\n";
+} else {
foreach my $statement ( @bugfix ) {
warn "$statement\n";
$dbh->do( $statement )
or die "Error: ". $dbh->errstr. "\n executing: $statement";
}
+}
- upgrade_schema();
+###
+# Fixes before schema upgrade
+###
+# this isn't actually the main schema upgrade, this calls _upgrade_schema
+# in any class that has it
+if ( $DRY_RUN ) {
+ #XXX no dry run for upgrade_schema stuff yet.
+ # looking at the code some are a mix of SQL statements and our methods, icky.
+ # its not like dry run is 100% anyway, all sort of other later upgrade tasks
+ # aren't printed either
+} else {
+ upgrade_schema(%upgrade_opts);
dbdef_create($dbh, $dbdef_file);
delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
reload_dbdef($dbdef_file);
-
}
-#you should have run fs-migrate-part_svc ages ago, when you upgraded
-#from 1.3 to 1.4... if not, it needs to be hooked into -upgrade here or
-#you'll lose all the part_svc settings it migrates to part_svc_column
+###
+# Now here is the main/automatic schema upgrade via DBIx::DBSchema
+###
my $conf = new FS::Conf;
{ 'nullify_default' => 1, },
);
-#### NEW CUSTOM FIELDS:
+###
+# New custom fields
+###
# 1. prevent new custom field columns from being dropped by upgrade
# 2. migrate old virtual fields to real fields (new custom fields)
-####
+
my $cfsth = $dbh->prepare("SELECT * FROM part_virtual_field")
or die $dbh->errstr;
$cfsth->execute or die $cfsth->errstr;
}
warn "Custom fields schema upgrade completed";
+###
+# Other stuff
+###
+
@statements =
grep { $_ !~ /^CREATE +INDEX +h_queue/i } #useless, holds up queue insertion
@statements;
if ( $opt_c ) {
+ #can always add it back for 4.x->4.x if we need it
+ die "FATAL: -c removed: cdr / h_cdr upgrade is required for 4.x\n";
+
@statements =
grep { $_ !~ /^ *ALTER +TABLE +(h_)?cdr /i }
@statements;
}
+
+###
+# Now run the @statements
+###
+
if ( $DRY_RUN ) {
print
join(";\n", @statements ). ";\n";
exit;
-} else {
- foreach my $statement ( @statements ) {
- warn "$statement\n";
- $dbh->do( $statement )
- or die "Error: ". $dbh->errstr. "\n executing: $statement";
+} elsif ( $opt_a ) {
+
+ ###
+ # -a: Run schema changes in parallel (Pg only).
+ ###
+
+ my $MAX_HANDLES; # undef for now, set it if you want a limit
+
+ my @phases = map { [] } 0..4;
+ my $fsupgrade_idx = 1;
+ my %idx_map;
+ foreach (@statements) {
+ if ( /^ *(CREATE|ALTER) +TABLE/ ) {
+ # phase 0: CREATE TABLE, ALTER TABLE
+ push @{ $phases[0] }, $_;
+ } elsif ( /^ *ALTER +INDEX.* RENAME TO dbs_temp(\d+)/ ) {
+ # phase 1: rename index to dbs_temp%d
+ # (see DBIx::DBSchema::Table)
+ # but in this case, uniqueify all the dbs_temps. This method only works
+ # because they are in the right order to begin with...
+ my $dbstemp_idx = $1;
+ s/dbs_temp$dbstemp_idx/fsupgrade_temp$fsupgrade_idx/;
+ $idx_map{ $dbstemp_idx } = $fsupgrade_idx;
+ push @{ $phases[1] }, $_;
+ $fsupgrade_idx++;
+ } elsif ( /^ *(CREATE|DROP)( +UNIQUE)? +INDEX/ ) {
+ # phase 2: create/drop indices
+ push @{ $phases[2] }, $_;
+ } elsif ( /^ *ALTER +INDEX +dbs_temp(\d+) +RENAME/ ) {
+ # phase 3: rename temp indices back to real ones
+ my $dbstemp_idx = $1;
+ my $mapped_idx = $idx_map{ $dbstemp_idx }
+ or die "unable to remap dbs_temp$1 RENAME statement";
+ s/dbs_temp$dbstemp_idx/fsupgrade_temp$mapped_idx/;
+ push @{ $phases[3] }, $_;
+ } else {
+ # phase 4: everything else (CREATE SEQUENCE, SELECT SETVAL, etc.)
+ push @{ $phases[4] }, $_;
+ }
}
+ my $i = 0;
+ my @busy = ();
+ my @free = ();
+ foreach my $phase (@phases) {
+ warn "Starting schema changes, phase $i...\n";
+ while (@$phase or @busy) {
+ # check status of all running tasks
+ my @newbusy;
+ my $failed_clone;
+ for my $clone (@busy) {
+ if ( $clone->pg_ready ) {
+ # then clean it up
+ my $rv = $clone->pg_result && $clone->commit;
+ $failed_clone = $clone if !$rv;
+ push @free, $clone;
+ } else {
+ push @newbusy, $clone;
+ }
+ }
+ if ( $failed_clone ) {
+ my $errstr = $failed_clone->errstr;
+ foreach my $clone (@newbusy, $failed_clone) {
+ $clone->pg_cancel if $clone->{pg_async_status} == 1;
+ $clone->disconnect;
+ }
+ die "$errstr\n";
+ }
+ @busy = @newbusy;
+ if (my $statement = $phase->[0]) {
+ my $clone;
+ if ( @free ) {
+ $clone = shift(@free);
+ } elsif ( !$MAX_HANDLES or
+ scalar(@free) + scalar(@busy) < $MAX_HANDLES ) {
+ $clone = $dbh->clone; # this will fail if over the server limit
+ }
+
+ if ( $clone ) {
+ my $rv = $clone->do($statement, {pg_async => PG_ASYNC});
+ if ( $rv ) {
+ warn "$statement\n";
+ shift @{ $phase }; # and actually take the statement off the queue
+ push @busy, $clone;
+ } # else I don't know, wait and retry
+ } # else too many handles, wait and retry
+ } elsif (@busy) {
+ # all statements are dispatched
+ warn "Waiting for phase $i to complete\n";
+ sleep 30;
+ }
+ } # while @$phase or @busy
+ $i++;
+ } # foreach $phase
+ warn "Schema changes complete.\n";
# warn "Pre-schema change upgrades completed in ". (time-$start). " seconds\n"; # if $DEBUG;
# $start = time;
# dbdef->update_schema( dbdef_dist(datasrc), $dbh );
+
+} else {
+
+ ###
+ # normal case, run statements sequentially
+ ###
+
+ foreach my $statement ( @statements ) {
+ warn "$statement\n";
+ $dbh->do( $statement )
+ or die "Error: ". $dbh->errstr. "\n executing: $statement";
+ }
}
warn "Schema upgrade completed in ". (time-$start). " seconds\n"; # if $DEBUG;
if ( $dbh->{Driver}->{Name} =~ /^mysql/i && ! $opt_s ) {
- foreach my $table (qw( svc_acct svc_phone )) {
+ foreach my $table (qw( svc_acct svc_phone cust_main_county )) {
my $sth = $dbh->prepare(
"SELECT COUNT(*) FROM duplicate_lock WHERE lockname = '$table'"
}
warn "Custom fields data upgrade completed";
-upgrade_config()
+upgrade_config(%upgrade_opts)
unless $DRY_RUN || $opt_s;
$dbh->commit or die $dbh->errstr;
warn "Config updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
$start = time;
-upgrade()
+upgrade(%upgrade_opts)
unless $DRY_RUN || $opt_s;
$dbh->commit or die $dbh->errstr;
warn "Table updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
$start = time;
-upgrade_sqlradius()
+upgrade_sqlradius(%upgrade_opts)
unless $DRY_RUN || $opt_s || $opt_r;
warn "SQL RADIUS updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
}
sub usage {
- die "Usage:\n freeside-upgrade [ -d ] [ -r ] [ -s ] [ -q | -v ] user\n";
+ die "Usage:\n freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ] user\n";
}
=head1 NAME
=head1 SYNOPSIS
- freeside-upgrade [ -d ] [ -r ] [ -c ] [ -s ] [ -q | -v ]
+ freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ]
=head1 DESCRIPTION
[ -q ]: Run quietly. This may become the default at some point.
- [ -r ]: Skip sqlradius updates. Useful for occassions where the sqlradius
- databases may be inaccessible.
-
- [ -c ]: Skip cdr and h_cdr updates.
-
[ -v ]: Run verbosely, sending debugging information to STDERR. This is the
current default.
[ -s ]: Schema changes only. Useful for Pg/slony slaves where the data
changes will be replicated from the Pg/slony master.
+ [ -r ]: Skip sqlradius updates. Useful for occassions where the sqlradius
+ databases may be inaccessible.
+
+ [ -j ]: Run certain upgrades asychronously from the job queue. Recommended
+ for very large cust_main or part_pkg tables that take too long to
+ upgrade.
+
+ [ -a ]: Run schema changes in parallel (Pg only). DBIx::DBSchema minimum
+ version 0.41 recommended. Recommended only for large databases and
+ powerful database servers, to reduce upgrade time.
+
=head1 SEE ALSO
=cut