4 use vars qw( $opt_d $opt_s $opt_q $opt_v $opt_r $opt_c $opt_j $opt_a );
5 use vars qw( $DEBUG $DRY_RUN );
7 use DBD::Pg qw(:async); #for -a
8 use DBIx::DBSchema 0.46;
9 use FS::UID qw(adminsuidsetup checkeuid datasrc driver_name);
11 use FS::Schema qw( dbdef dbdef_dist reload_dbdef );
12 use FS::Misc::prune qw(prune_applications);
14 use FS::Record qw(qsearch);
15 use FS::Upgrade qw(upgrade_schema upgrade_config upgrade upgrade_sqlradius);
19 die "Not running uid freeside!" unless checkeuid();
28 my $user = shift or die &usage;
29 $FS::CurrentUser::upgrade_hack = 1;
30 $FS::UID::callback_hack = 1;
31 my $dbh = adminsuidsetup($user);
32 $FS::UID::callback_hack = 0;
34 # pass command line opts through to upgrade* routines
42 if ( driver_name =~ /^mysql/i ) { #until 0.39 is required above
43 eval "use DBIx::DBSchema 0.39;";
47 #needs to match FS::Schema...
48 my $dbdef_file = "%%%FREESIDE_CONF%%%/dbdef.". datasrc;
50 dbdef_create($dbh, $dbdef_file);
52 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
53 reload_dbdef($dbdef_file);
55 warn "Upgrade startup completed in ". (time-$start). " seconds\n"; # if $DEBUG;
58 #$DBIx::DBSchema::DEBUG = $DEBUG;
59 #$DBIx::DBSchema::Table::DEBUG = $DEBUG;
60 #$DBIx::DBSchema::Index::DEBUG = $DEBUG;
64 if (dbdef->table('cust_main')->column('agent_custid') && ! $opt_s) {
66 "UPDATE cust_main SET agent_custid = NULL where agent_custid = ''";
69 "UPDATE h_cust_main SET agent_custid = NULL where agent_custid = ''"
70 if (dbdef->table('h_cust_main'));
73 if ( dbdef->table('cgp_rule_condition') &&
74 dbdef->table('cgp_rule_condition')->column('condition')
78 "ALTER TABLE ${_}cgp_rule_condition RENAME COLUMN condition TO conditionname"
83 if ( dbdef->table('areacode') and
84 dbdef->table('areacode')->primary_key eq 'code' )
86 if ( driver_name =~ /^mysql/i ) {
88 'ALTER TABLE areacode DROP PRIMARY KEY',
89 'ALTER TABLE areacode ADD COLUMN (areanum int auto_increment primary key)';
92 push @bugfix, 'ALTER TABLE areacode DROP CONSTRAINT areacode_pkey';
96 if ( dbdef->table('upgrade_journal') ) {
97 if ( driver_name =~ /^Pg/i ) {
99 SELECT SETVAL( 'upgrade_journal_upgradenum_seq',
100 ( SELECT MAX(upgradenum) FROM upgrade_journal )
103 #MySQL can't do this in a statement so have to do it manually
104 #} elsif ( driver_name =~ /^mysql/i ) {
106 # ALTER TABLE upgrade_journal AUTO_INCREMENT =
107 # ( ( SELECT MAX(upgradenum) FROM upgrade_journal ) + 1 )
114 join(";\n", @bugfix ). ";\n";
115 } elsif ( @bugfix ) {
117 foreach my $statement ( @bugfix ) {
119 $dbh->do( $statement )
120 or die "Error: ". $dbh->errstr. "\n executing: $statement";
123 upgrade_schema(%upgrade_opts);
125 dbdef_create($dbh, $dbdef_file);
126 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
127 reload_dbdef($dbdef_file);
131 #you should have run fs-migrate-part_svc ages ago, when you upgraded
132 #from 1.3 to 1.4... if not, it needs to be hooked into -upgrade here or
133 #you'll lose all the part_svc settings it migrates to part_svc_column
135 my $conf = new FS::Conf;
137 my $dbdef_dist = dbdef_dist(
139 { 'queue-no_history' => $conf->exists('queue-no_history') },
142 my @statements = dbdef->sql_update_schema( $dbdef_dist,
144 { 'nullify_default' => 1, },
147 #### NEW CUSTOM FIELDS:
148 # 1. prevent new custom field columns from being dropped by upgrade
149 # 2. migrate old virtual fields to real fields (new custom fields)
151 my $cfsth = $dbh->prepare("SELECT * FROM part_virtual_field")
153 $cfsth->execute or die $cfsth->errstr;
155 while ( $cf = $cfsth->fetchrow_hashref ) {
156 my $tbl = $cf->{'dbtable'};
157 my $name = $cf->{'name'};
158 $name = lc($name) unless driver_name =~ /^mysql/i;
160 @statements = grep { $_ !~ /^\s*ALTER\s+TABLE\s+(h_|)$tbl\s+DROP\s+COLUMN\s+cf_$name\s*$/i }
163 "ALTER TABLE $tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")"
164 unless (dbdef->table($tbl) && dbdef->table($tbl)->column("cf_$name"));
166 "ALTER TABLE h_$tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")"
167 unless (dbdef->table("h_$tbl") && dbdef->table("h_$tbl")->column("cf_$name"));
169 warn "Custom fields schema upgrade completed";
172 grep { $_ !~ /^CREATE +INDEX +h_queue/i } #useless, holds up queue insertion
175 unless ( driver_name =~ /^mysql/i ) {
176 #not necessary under non-mysql, takes forever on big db
178 grep { $_ !~ /^ *ALTER +TABLE +h_queue +ALTER +COLUMN +job +TYPE +varchar\(512\) *$/i }
184 #can always add it back for 4.x->4.x if we need it
185 die "FATAL: -c removed: cdr / h_cdr upgrade is required for 4.x\n";
188 grep { $_ !~ /^ *ALTER +TABLE +(h_)?cdr /i }
192 grep { $_ !~ /^ *CREATE +INDEX +(h_)?cdr\d+ /i }
197 my $MAX_HANDLES; # undef for now, set it if you want a limit
201 join(";\n", @statements ). ";\n";
205 my @phases = map { [] } 0..4;
206 my $fsupgrade_idx = 1;
208 foreach (@statements) {
209 if ( /^ *(CREATE|ALTER) +TABLE/ ) {
210 # phase 0: CREATE TABLE, ALTER TABLE
211 push @{ $phases[0] }, $_;
212 } elsif ( /^ *ALTER +INDEX.* RENAME TO dbs_temp(\d+)/ ) {
213 # phase 1: rename index to dbs_temp%d
214 # (see DBIx::DBSchema::Table)
215 # but in this case, uniqueify all the dbs_temps. This method only works
216 # because they are in the right order to begin with...
217 my $dbstemp_idx = $1;
218 s/dbs_temp$dbstemp_idx/fsupgrade_temp$fsupgrade_idx/;
219 $idx_map{ $dbstemp_idx } = $fsupgrade_idx;
220 push @{ $phases[1] }, $_;
222 } elsif ( /^ *(CREATE|DROP)( +UNIQUE)? +INDEX/ ) {
223 # phase 2: create/drop indices
224 push @{ $phases[2] }, $_;
225 } elsif ( /^ *ALTER +INDEX +dbs_temp(\d+) +RENAME/ ) {
226 # phase 3: rename temp indices back to real ones
227 my $dbstemp_idx = $1;
228 my $mapped_idx = $idx_map{ $dbstemp_idx }
229 or die "unable to remap dbs_temp$1 RENAME statement";
230 s/dbs_temp$dbstemp_idx/fsupgrade_temp$mapped_idx/;
231 push @{ $phases[3] }, $_;
233 # phase 4: everything else (CREATE SEQUENCE, SELECT SETVAL, etc.)
234 push @{ $phases[4] }, $_;
240 foreach my $phase (@phases) {
241 warn "Starting schema changes, phase $i...\n";
242 while (@$phase or @busy) {
243 # check status of all running tasks
246 for my $clone (@busy) {
247 if ( $clone->pg_ready ) {
249 my $rv = $clone->pg_result && $clone->commit;
250 $failed_clone = $clone if !$rv;
253 push @newbusy, $clone;
256 if ( $failed_clone ) {
257 my $errstr = $failed_clone->errstr;
258 foreach my $clone (@newbusy, $failed_clone) {
259 $clone->pg_cancel if $clone->{pg_async_status} == 1;
265 if (my $statement = $phase->[0]) {
268 $clone = shift(@free);
269 } elsif ( !$MAX_HANDLES or
270 scalar(@free) + scalar(@busy) < $MAX_HANDLES ) {
271 $clone = $dbh->clone; # this will fail if over the server limit
275 my $rv = $clone->do($statement, {pg_async => PG_ASYNC});
278 shift @{ $phase }; # and actually take the statement off the queue
280 } # else I don't know, wait and retry
281 } # else too many handles, wait and retry
283 # all statements are dispatched
284 warn "Waiting for phase $i to complete\n";
287 } # while @$phase or @busy
290 warn "Schema changes complete.\n";
292 # warn "Pre-schema change upgrades completed in ". (time-$start). " seconds\n"; # if $DEBUG;
295 # dbdef->update_schema( dbdef_dist(datasrc), $dbh );
296 } else { # normal case, run statements sequentially
297 foreach my $statement ( @statements ) {
299 $dbh->do( $statement )
300 or die "Error: ". $dbh->errstr. "\n executing: $statement";
304 warn "Schema upgrade completed in ". (time-$start). " seconds\n"; # if $DEBUG;
308 $hashref->{dry_run} = 1 if $DRY_RUN;
309 $hashref->{debug} = 1 if $DEBUG && $DRY_RUN;
310 prune_applications($hashref) unless $opt_s;
312 warn "Application pruning completed in ". (time-$start). " seconds\n"; # if $DEBUG;
315 print "\n" if $DRY_RUN;
317 if ( $dbh->{Driver}->{Name} =~ /^mysql/i && ! $opt_s ) {
319 foreach my $table (qw( svc_acct svc_phone )) {
321 my $sth = $dbh->prepare(
322 "SELECT COUNT(*) FROM duplicate_lock WHERE lockname = '$table'"
323 ) or die $dbh->errstr;
325 $sth->execute or die $sth->errstr;
327 unless ( $sth->fetchrow_arrayref->[0] ) {
329 $sth = $dbh->prepare(
330 "INSERT INTO duplicate_lock ( lockname ) VALUES ( '$table' )"
331 ) or die $dbh->errstr;
333 $sth->execute or die $sth->errstr;
339 warn "Duplication lock creation completed in ". (time-$start). " seconds\n"; # if $DEBUG;
344 $dbh->commit or die $dbh->errstr;
346 dbdef_create($dbh, $dbdef_file);
348 $dbh->disconnect or die $dbh->errstr;
350 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
351 $FS::UID::AutoCommit = 0;
352 $FS::UID::callback_hack = 1;
353 $dbh = adminsuidsetup($user);
354 $FS::UID::callback_hack = 0;
355 unless ( $DRY_RUN || $opt_s ) {
356 my $dir = "%%%FREESIDE_CONF%%%/conf.". datasrc;
357 if (!scalar(qsearch('conf', {}))) {
358 my $error = FS::Conf::init_config($dir);
360 warn "CONFIGURATION UPGRADE FAILED\n";
361 $dbh->rollback or die $dbh->errstr;
366 $dbh->commit or die $dbh->errstr;
367 $dbh->disconnect or die $dbh->errstr;
369 $FS::UID::AutoCommit = 1;
371 $dbh = adminsuidsetup($user);
373 warn "Re-initialization with updated schema completed in ". (time-$start). " seconds\n"; # if $DEBUG;
376 #### NEW CUSTOM FIELDS:
377 # 3. migrate old virtual field data to the new custom fields
379 $cfsth = $dbh->prepare("SELECT * FROM virtual_field left join part_virtual_field using (vfieldpart)")
381 $cfsth->execute or die $cfsth->errstr;
383 while ( $cf = $cfsth->fetchrow_hashref ) {
384 my $tbl = $cf->{'dbtable'};
385 my $name = $cf->{'name'};
386 my $dtable = dbdef->table($tbl);
387 next unless $dtable && $dtable->primary_key; # XXX: warn first?
388 my $pkey = $dtable->primary_key;
389 next unless $dtable->column($pkey)->type =~ /int/i; # XXX: warn first?
390 push @cfst, "UPDATE $tbl set cf_$name = '".$cf->{'value'}."' WHERE $pkey = ".$cf->{'recnum'};
391 push @cfst, "DELETE FROM virtual_field WHERE vfieldnum = ".$cf->{'vfieldnum'};
393 foreach my $cfst ( @cfst ) {
396 or die "Error: ". $dbh->errstr. "\n executing: $cfst";
398 warn "Custom fields data upgrade completed";
400 upgrade_config(%upgrade_opts)
401 unless $DRY_RUN || $opt_s;
403 $dbh->commit or die $dbh->errstr;
405 warn "Config updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
408 upgrade(%upgrade_opts)
409 unless $DRY_RUN || $opt_s;
411 $dbh->commit or die $dbh->errstr;
413 warn "Table updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
416 upgrade_sqlradius(%upgrade_opts)
417 unless $DRY_RUN || $opt_s || $opt_r;
419 warn "SQL RADIUS updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
422 $dbh->commit or die $dbh->errstr;
423 $dbh->disconnect or die $dbh->errstr;
425 warn "Final commit and disconnection completed in ". (time-$start). " seconds; upgrade done!\n"; # if $DEBUG;
429 sub dbdef_create { # reverse engineer the schema from the DB and save to file
430 my( $dbh, $file ) = @_;
431 my $dbdef = new_native DBIx::DBSchema $dbh;
436 die "Usage:\n freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ] user\n";
441 freeside-upgrade - Upgrades database schema for new freeside verisons.
445 freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -s ] [ -j ] [ -a ]
449 Reads your existing database schema and updates it to match the current schema,
450 adding any columns or tables necessary.
452 Also performs other upgrade functions:
456 =item Calls FS:: Misc::prune::prune_applications (probably unnecessary every upgrade, but simply won't find any records to change)
458 =item If necessary, moves your configuration information from the filesystem in /usr/local/etc/freeside/conf.<datasrc> to the database.
462 [ -d ]: Dry run; output SQL statements (to STDOUT) only, but do not execute
465 [ -q ]: Run quietly. This may become the default at some point.
467 [ -v ]: Run verbosely, sending debugging information to STDERR. This is the
470 [ -s ]: Schema changes only. Useful for Pg/slony slaves where the data
471 changes will be replicated from the Pg/slony master.
473 [ -r ]: Skip sqlradius updates. Useful for occassions where the sqlradius
474 databases may be inaccessible.
476 [ -j ]: Run certain upgrades asychronously from the job queue. Currently
477 used only for the 2.x -> 3.x cust_location, cust_pay and part_pkg
478 upgrades. This may cause odd behavior before the upgrade is
479 complete, so it's recommended only for very large cust_main, cust_pay
480 and/or part_pkg tables that take too long to upgrade.
482 [ -a ]: Run schema changes in parallel (Pg only). DBIx::DBSchema minimum
483 version 0.41 recommended. Recommended only for large databases and
484 powerful database servers, to reduce upgrade time.