X-Git-Url: http://git.freeside.biz/gitweb/?p=freeside.git;a=blobdiff_plain;f=FS%2Fbin%2Ffreeside-upgrade;h=0df38841114bad0fcbcaa2746447d00e7e1aea43;hp=02a615a68a3c1309724d01182a1ed25f6472679a;hb=c213fdbabc178985a93996aac33a907465b31007;hpb=dc39152e4b1b881f0115aa5cbe42fbf48a71e5c5 diff --git a/FS/bin/freeside-upgrade b/FS/bin/freeside-upgrade index 02a615a68..0df388411 100755 --- a/FS/bin/freeside-upgrade +++ b/FS/bin/freeside-upgrade @@ -1,20 +1,24 @@ #!/usr/bin/perl -w use strict; -use vars qw($opt_d $opt_q $opt_v); -use vars qw($DEBUG $DRY_RUN); +use vars qw( $opt_d $opt_s $opt_q $opt_v $opt_r $opt_c $opt_j $opt_a ); +use vars qw( $DEBUG $DRY_RUN ); use Getopt::Std; -use DBIx::DBSchema 0.31; -use FS::UID qw(adminsuidsetup checkeuid datasrc ); #getsecrets); +use DBD::Pg qw(:async); #for -a +use DBIx::DBSchema 0.46; +use FS::UID qw(adminsuidsetup checkeuid datasrc driver_name); use FS::CurrentUser; use FS::Schema qw( dbdef dbdef_dist reload_dbdef ); use FS::Misc::prune qw(prune_applications); use FS::Conf; use FS::Record qw(qsearch); +use FS::Upgrade qw(upgrade_schema upgrade_config upgrade upgrade_sqlradius); + +my $start = time; die "Not running uid freeside!" unless checkeuid(); -getopts("dq"); +getopts("dqrcsja"); $DEBUG = !$opt_q; #$DEBUG = $opt_v; @@ -27,6 +31,19 @@ $FS::UID::callback_hack = 1; my $dbh = adminsuidsetup($user); $FS::UID::callback_hack = 0; +# pass command line opts through to upgrade* routines +my %upgrade_opts = ( + quiet => $opt_q, + verbose => $opt_v, + queue => $opt_j, + # others? +); + +if ( driver_name =~ /^mysql/i ) { #until 0.39 is required above + eval "use DBIx::DBSchema 0.39;"; + die $@ if $@; +} + #needs to match FS::Schema... my $dbdef_file = "%%%FREESIDE_CONF%%%/dbdef.". datasrc; @@ -35,61 +52,275 @@ dbdef_create($dbh, $dbdef_file); delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload reload_dbdef($dbdef_file); -$DBIx::DBSchema::DEBUG = $DEBUG; -$DBIx::DBSchema::Table::DEBUG = $DEBUG; -$DBIx::DBSchema::Index::DEBUG = $DEBUG; +warn "Upgrade startup completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + +#$DBIx::DBSchema::DEBUG = $DEBUG; +#$DBIx::DBSchema::Table::DEBUG = $DEBUG; +#$DBIx::DBSchema::Index::DEBUG = $DEBUG; my @bugfix = (); -if (dbdef->table('cust_main')->column('agent_custid')) { - push @bugfix, - "UPDATE cust_main SET agent_custid = NULL where agent_custid = ''"; +if ( $DRY_RUN ) { + print join(";\n", @bugfix ). ";\n"; +} else { + foreach my $statement ( @bugfix ) { + warn "$statement\n"; + $dbh->do( $statement ) + or die "Error: ". $dbh->errstr. "\n executing: $statement"; + } +} + +### +# Fixes before schema upgrade +### +# this isn't actually the main schema upgrade, this calls _upgrade_schema +# in any class that has it +if ( $DRY_RUN ) { + #XXX no dry run for upgrade_schema stuff yet. + # looking at the code some are a mix of SQL statements and our methods, icky. + # its not like dry run is 100% anyway, all sort of other later upgrade tasks + # aren't printed either +} else { + upgrade_schema(%upgrade_opts); + + dbdef_create($dbh, $dbdef_file); + delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload + reload_dbdef($dbdef_file); +} + +### +# Now here is the main/automatic schema upgrade via DBIx::DBSchema +### + +my $conf = new FS::Conf; + +my $dbdef_dist = dbdef_dist( + datasrc, + { 'queue-no_history' => $conf->exists('queue-no_history') }, +); + +my @statements = dbdef->sql_update_schema( $dbdef_dist, + $dbh, + { 'nullify_default' => 1, }, + ); + +### +# New custom fields +### +# 1. prevent new custom field columns from being dropped by upgrade +# 2. migrate old virtual fields to real fields (new custom fields) + +my $cfsth = $dbh->prepare("SELECT * FROM part_virtual_field") + or die $dbh->errstr; +$cfsth->execute or die $cfsth->errstr; +my $cf; +while ( $cf = $cfsth->fetchrow_hashref ) { + my $tbl = $cf->{'dbtable'}; + my $name = $cf->{'name'}; + $name = lc($name) unless driver_name =~ /^mysql/i; + + @statements = grep { $_ !~ /^\s*ALTER\s+TABLE\s+(h_|)$tbl DROP\s+COLUMN\s+cf_$name/i } + @statements; + push @statements, + "ALTER TABLE $tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")" + unless (dbdef->table($tbl) && dbdef->table($tbl)->column("cf_$name")); + push @statements, + "ALTER TABLE h_$tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")" + unless (dbdef->table("h_$tbl") && dbdef->table("h_$tbl")->column("cf_$name")); +} +warn "Custom fields schema upgrade completed"; + +### +# Other stuff +### + +@statements = + grep { $_ !~ /^CREATE +INDEX +h_queue/i } #useless, holds up queue insertion + @statements; - push @bugfix, - "UPDATE h_cust_main SET agent_custid = NULL where agent_custid = ''" - if (dbdef->table('h_cust_main')); +unless ( driver_name =~ /^mysql/i ) { + #not necessary under non-mysql, takes forever on big db + @statements = + grep { $_ !~ /^ *ALTER +TABLE +h_queue +ALTER +COLUMN +job +TYPE +varchar\(512\) *$/i } + @statements; } -#you should have run fs-migrate-part_svc ages ago, when you upgraded -#from 1.3 to 1.4... if not, it needs to be hooked into -upgrade here or -#you'll lose all the part_svc settings it migrates to part_svc_column +if ( $opt_c ) { + + #can always add it back for 4.x->4.x if we need it + die "FATAL: -c removed: cdr / h_cdr upgrade is required for 4.x\n"; + + @statements = + grep { $_ !~ /^ *ALTER +TABLE +(h_)?cdr /i } + @statements; + + @statements = + grep { $_ !~ /^ *CREATE +INDEX +(h_)?cdr\d+ /i } + @statements; + +} + + +### +# Now run the @statements +### if ( $DRY_RUN ) { print - join(";\n", @bugfix, dbdef->sql_update_schema( dbdef_dist(datasrc), $dbh ) ). ";\n"; + join(";\n", @statements ). ";\n"; exit; +} elsif ( $opt_a ) { + + ### + # -a: Run schema changes in parallel (Pg only). + ### + + my $MAX_HANDLES; # undef for now, set it if you want a limit + + my @phases = map { [] } 0..4; + my $fsupgrade_idx = 1; + my %idx_map; + foreach (@statements) { + if ( /^ *(CREATE|ALTER) +TABLE/ ) { + # phase 0: CREATE TABLE, ALTER TABLE + push @{ $phases[0] }, $_; + } elsif ( /^ *ALTER +INDEX.* RENAME TO dbs_temp(\d+)/ ) { + # phase 1: rename index to dbs_temp%d + # (see DBIx::DBSchema::Table) + # but in this case, uniqueify all the dbs_temps. This method only works + # because they are in the right order to begin with... + my $dbstemp_idx = $1; + s/dbs_temp$dbstemp_idx/fsupgrade_temp$fsupgrade_idx/; + $idx_map{ $dbstemp_idx } = $fsupgrade_idx; + push @{ $phases[1] }, $_; + $fsupgrade_idx++; + } elsif ( /^ *(CREATE|DROP)( +UNIQUE)? +INDEX/ ) { + # phase 2: create/drop indices + push @{ $phases[2] }, $_; + } elsif ( /^ *ALTER +INDEX +dbs_temp(\d+) +RENAME/ ) { + # phase 3: rename temp indices back to real ones + my $dbstemp_idx = $1; + my $mapped_idx = $idx_map{ $dbstemp_idx } + or die "unable to remap dbs_temp$1 RENAME statement"; + s/dbs_temp$dbstemp_idx/fsupgrade_temp$mapped_idx/; + push @{ $phases[3] }, $_; + } else { + # phase 4: everything else (CREATE SEQUENCE, SELECT SETVAL, etc.) + push @{ $phases[4] }, $_; + } + } + my $i = 0; + my @busy = (); + my @free = (); + foreach my $phase (@phases) { + warn "Starting schema changes, phase $i...\n"; + while (@$phase or @busy) { + # check status of all running tasks + my @newbusy; + my $failed_clone; + for my $clone (@busy) { + if ( $clone->pg_ready ) { + # then clean it up + my $rv = $clone->pg_result && $clone->commit; + $failed_clone = $clone if !$rv; + push @free, $clone; + } else { + push @newbusy, $clone; + } + } + if ( $failed_clone ) { + my $errstr = $failed_clone->errstr; + foreach my $clone (@newbusy, $failed_clone) { + $clone->pg_cancel if $clone->{pg_async_status} == 1; + $clone->disconnect; + } + die "$errstr\n"; + } + @busy = @newbusy; + if (my $statement = $phase->[0]) { + my $clone; + if ( @free ) { + $clone = shift(@free); + } elsif ( !$MAX_HANDLES or + scalar(@free) + scalar(@busy) < $MAX_HANDLES ) { + $clone = $dbh->clone; # this will fail if over the server limit + } + + if ( $clone ) { + my $rv = $clone->do($statement, {pg_async => PG_ASYNC}); + if ( $rv ) { + warn "$statement\n"; + shift @{ $phase }; # and actually take the statement off the queue + push @busy, $clone; + } # else I don't know, wait and retry + } # else too many handles, wait and retry + } elsif (@busy) { + # all statements are dispatched + warn "Waiting for phase $i to complete\n"; + sleep 30; + } + } # while @$phase or @busy + $i++; + } # foreach $phase + warn "Schema changes complete.\n"; + +# warn "Pre-schema change upgrades completed in ". (time-$start). " seconds\n"; # if $DEBUG; +# $start = time; + +# dbdef->update_schema( dbdef_dist(datasrc), $dbh ); + } else { - foreach my $statement ( @bugfix ) { + + ### + # normal case, run statements sequentially + ### + + foreach my $statement ( @statements ) { + warn "$statement\n"; $dbh->do( $statement ) or die "Error: ". $dbh->errstr. "\n executing: $statement"; } - - dbdef->update_schema( dbdef_dist(datasrc), $dbh ); } +warn "Schema upgrade completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + my $hashref = {}; $hashref->{dry_run} = 1 if $DRY_RUN; -$hashref->{debug} = 1 if $DEBUG; -print join "\n", prune_applications($hashref); -print "\n" if $DRY_RUN; +$hashref->{debug} = 1 if $DEBUG && $DRY_RUN; +prune_applications($hashref) unless $opt_s; -if ( $dbh->{Driver}->{Name} =~ /^mysql/i ) { +warn "Application pruning completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; - my $sth = $dbh->prepare( - "SELECT COUNT(*) FROM duplicate_lock WHERE lockname = 'svc_acct'" - ) or die $dbh->errstr; +print "\n" if $DRY_RUN; - $sth->execute or die $sth->errstr; +if ( $dbh->{Driver}->{Name} =~ /^mysql/i && ! $opt_s ) { - unless ( $sth->fetchrow_arrayref->[0] ) { + foreach my $table (qw( svc_acct svc_phone cust_main_county )) { - $sth = $dbh->prepare( - "INSERT INTO duplicate_lock ( lockname ) VALUES ( 'svc_acct' )" + my $sth = $dbh->prepare( + "SELECT COUNT(*) FROM duplicate_lock WHERE lockname = '$table'" ) or die $dbh->errstr; $sth->execute or die $sth->errstr; + unless ( $sth->fetchrow_arrayref->[0] ) { + + $sth = $dbh->prepare( + "INSERT INTO duplicate_lock ( lockname ) VALUES ( '$table' )" + ) or die $dbh->errstr; + + $sth->execute or die $sth->errstr; + + } + } + + warn "Duplication lock creation completed in ". (time-$start). " seconds\n"; # if $DEBUG; + $start = time; + } $dbh->commit or die $dbh->errstr; @@ -99,11 +330,11 @@ dbdef_create($dbh, $dbdef_file); $dbh->disconnect or die $dbh->errstr; delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload -$FS::UID::AutoCommit = 1; +$FS::UID::AutoCommit = 0; $FS::UID::callback_hack = 1; $dbh = adminsuidsetup($user); $FS::UID::callback_hack = 0; -unless ( $DRY_RUN ) { +unless ( $DRY_RUN || $opt_s ) { my $dir = "%%%FREESIDE_CONF%%%/conf.". datasrc; if (!scalar(qsearch('conf', {}))) { my $error = FS::Conf::init_config($dir); @@ -117,6 +348,64 @@ unless ( $DRY_RUN ) { $dbh->commit or die $dbh->errstr; $dbh->disconnect or die $dbh->errstr; +$FS::UID::AutoCommit = 1; + +$dbh = adminsuidsetup($user); + +warn "Re-initialization with updated schema completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + +#### NEW CUSTOM FIELDS: +# 3. migrate old virtual field data to the new custom fields +#### +$cfsth = $dbh->prepare("SELECT * FROM virtual_field left join part_virtual_field using (vfieldpart)") + or die $dbh->errstr; +$cfsth->execute or die $cfsth->errstr; +my @cfst; +while ( $cf = $cfsth->fetchrow_hashref ) { + my $tbl = $cf->{'dbtable'}; + my $name = $cf->{'name'}; + my $dtable = dbdef->table($tbl); + next unless $dtable && $dtable->primary_key; # XXX: warn first? + my $pkey = $dtable->primary_key; + next unless $dtable->column($pkey)->type =~ /int/i; # XXX: warn first? + push @cfst, "UPDATE $tbl set cf_$name = '".$cf->{'value'}."' WHERE $pkey = ".$cf->{'recnum'}; + push @cfst, "DELETE FROM virtual_field WHERE vfieldnum = ".$cf->{'vfieldnum'}; +} +foreach my $cfst ( @cfst ) { + warn "$cfst\n"; + $dbh->do( $cfst ) + or die "Error: ". $dbh->errstr. "\n executing: $cfst"; +} +warn "Custom fields data upgrade completed"; + +upgrade_config(%upgrade_opts) + unless $DRY_RUN || $opt_s; + +$dbh->commit or die $dbh->errstr; + +warn "Config updates completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + +upgrade(%upgrade_opts) + unless $DRY_RUN || $opt_s; + +$dbh->commit or die $dbh->errstr; + +warn "Table updates completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + +upgrade_sqlradius(%upgrade_opts) + unless $DRY_RUN || $opt_s || $opt_r; + +warn "SQL RADIUS updates completed in ". (time-$start). " seconds\n"; # if $DEBUG; +$start = time; + +$dbh->commit or die $dbh->errstr; +$dbh->disconnect or die $dbh->errstr; + +warn "Final commit and disconnection completed in ". (time-$start). " seconds; upgrade done!\n"; # if $DEBUG; + ### sub dbdef_create { # reverse engineer the schema from the DB and save to file @@ -126,7 +415,7 @@ sub dbdef_create { # reverse engineer the schema from the DB and save to file } sub usage { - die "Usage:\n freeside-upgrade [ -d ] [ -q | -v ] user\n"; + die "Usage:\n freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ] user\n"; } =head1 NAME @@ -135,7 +424,7 @@ freeside-upgrade - Upgrades database schema for new freeside verisons. =head1 SYNOPSIS - freeside-upgrade [ -d ] [ -q | -v ] + freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ] =head1 DESCRIPTION @@ -160,6 +449,22 @@ Also performs other upgrade functions: [ -v ]: Run verbosely, sending debugging information to STDERR. This is the current default. + [ -s ]: Schema changes only. Used to be useful for Pg/slony slaves where the + data changes would be replicated from the Pg/slony master (current + native Pg replication replicates schema changes to slaves + automatically). + + [ -r ]: Skip sqlradius updates. Useful for occassions where the sqlradius + databases may be inaccessible. + + [ -j ]: Run certain upgrades asychronously from the job queue. Recommended + for very large cust_main or part_pkg tables that take too long to + upgrade. + + [ -a ]: Run schema changes in parallel (Pg only). DBIx::DBSchema minimum + version 0.41 recommended. Recommended only for large databases and + powerful database servers, to reduce upgrade time. + =head1 SEE ALSO =cut