4 use vars qw( $opt_d $opt_s $opt_q $opt_v $opt_r $opt_c $opt_j $opt_a );
5 use vars qw( $DEBUG $DRY_RUN );
7 use DBD::Pg qw(:async); #for -a
8 use DBIx::DBSchema 0.31; #0.39
9 use FS::UID qw(adminsuidsetup checkeuid datasrc driver_name);
11 use FS::Schema qw( dbdef dbdef_dist reload_dbdef );
12 use FS::Misc::prune qw(prune_applications);
14 use FS::Record qw(qsearch);
15 use FS::Upgrade qw(upgrade_schema upgrade_config upgrade upgrade_sqlradius);
19 die "Not running uid freeside!" unless checkeuid();
28 my $user = shift or die &usage;
29 $FS::CurrentUser::upgrade_hack = 1;
30 $FS::UID::callback_hack = 1;
31 my $dbh = adminsuidsetup($user);
32 $FS::UID::callback_hack = 0;
34 # pass command line opts through to upgrade* routines
42 if ( driver_name =~ /^mysql/i ) { #until 0.39 is required above
43 eval "use DBIx::DBSchema 0.39;";
47 #needs to match FS::Schema...
48 my $dbdef_file = "%%%FREESIDE_CONF%%%/dbdef.". datasrc;
50 dbdef_create($dbh, $dbdef_file);
52 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
53 reload_dbdef($dbdef_file);
55 warn "Upgrade startup completed in ". (time-$start). " seconds\n"; # if $DEBUG;
58 #$DBIx::DBSchema::DEBUG = $DEBUG;
59 #$DBIx::DBSchema::Table::DEBUG = $DEBUG;
60 #$DBIx::DBSchema::Index::DEBUG = $DEBUG;
64 if (dbdef->table('cust_main')->column('agent_custid') && ! $opt_s) {
66 "UPDATE cust_main SET agent_custid = NULL where agent_custid = ''";
69 "UPDATE h_cust_main SET agent_custid = NULL where agent_custid = ''"
70 if (dbdef->table('h_cust_main'));
73 if ( dbdef->table('cgp_rule_condition') &&
74 dbdef->table('cgp_rule_condition')->column('condition')
78 "ALTER TABLE ${_}cgp_rule_condition RENAME COLUMN condition TO conditionname"
83 if ( dbdef->table('areacode') and
84 dbdef->table('areacode')->primary_key eq 'code' )
86 if ( driver_name =~ /^mysql/i ) {
88 'ALTER TABLE areacode DROP PRIMARY KEY',
89 'ALTER TABLE areacode ADD COLUMN (areanum int auto_increment primary key)';
92 push @bugfix, 'ALTER TABLE areacode DROP CONSTRAINT areacode_pkey';
96 if ( dbdef->table('upgrade_journal') ) {
97 if ( driver_name =~ /^Pg/i ) {
99 SELECT SETVAL( 'upgrade_journal_upgradenum_seq',
100 ( SELECT MAX(upgradenum) FROM upgrade_journal )
103 #MySQL can't do this in a statement so have to do it manually
104 #} elsif ( driver_name =~ /^mysql/i ) {
106 # ALTER TABLE upgrade_journal AUTO_INCREMENT =
107 # ( ( SELECT MAX(upgradenum) FROM upgrade_journal ) + 1 )
114 join(";\n", @bugfix ). ";\n";
115 } elsif ( @bugfix ) {
117 foreach my $statement ( @bugfix ) {
119 $dbh->do( $statement )
120 or die "Error: ". $dbh->errstr. "\n executing: $statement";
123 upgrade_schema(%upgrade_opts);
125 dbdef_create($dbh, $dbdef_file);
126 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
127 reload_dbdef($dbdef_file);
131 #you should have run fs-migrate-part_svc ages ago, when you upgraded
132 #from 1.3 to 1.4... if not, it needs to be hooked into -upgrade here or
133 #you'll lose all the part_svc settings it migrates to part_svc_column
135 my $conf = new FS::Conf;
137 my $dbdef_dist = dbdef_dist(
139 { 'queue-no_history' => $conf->exists('queue-no_history') },
142 my @statements = dbdef->sql_update_schema( $dbdef_dist,
144 { 'nullify_default' => 1, },
147 #### NEW CUSTOM FIELDS:
148 # 1. prevent new custom field columns from being dropped by upgrade
149 # 2. migrate old virtual fields to real fields (new custom fields)
151 my $cfsth = $dbh->prepare("SELECT * FROM part_virtual_field")
153 $cfsth->execute or die $cfsth->errstr;
155 while ( $cf = $cfsth->fetchrow_hashref ) {
156 my $tbl = $cf->{'dbtable'};
157 my $name = $cf->{'name'};
158 $name = lc($name) unless driver_name =~ /^mysql/i;
160 @statements = grep { $_ !~ /^\s*ALTER\s+TABLE\s+(h_|)$tbl\s+DROP\s+COLUMN\s+cf_$name\s*$/i }
163 "ALTER TABLE $tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")"
164 unless (dbdef->table($tbl) && dbdef->table($tbl)->column("cf_$name"));
166 "ALTER TABLE h_$tbl ADD COLUMN cf_$name varchar(".$cf->{'length'}.")"
167 unless (dbdef->table("h_$tbl") && dbdef->table("h_$tbl")->column("cf_$name"));
169 warn "Custom fields schema upgrade completed";
172 grep { $_ !~ /^CREATE +INDEX +h_queue/i } #useless, holds up queue insertion
175 unless ( driver_name =~ /^mysql/i ) {
176 #not necessary under non-mysql, takes forever on big db
178 grep { $_ !~ /^ *ALTER +TABLE +h_queue +ALTER +COLUMN +job +TYPE +varchar\(512\) *$/i }
185 grep { $_ !~ /^ *ALTER +TABLE +(h_)?cdr /i }
189 grep { $_ !~ /^ *CREATE +INDEX +(h_)?cdr\d+ /i }
194 my $MAX_HANDLES; # undef for now, set it if you want a limit
198 join(";\n", @statements ). ";\n";
202 my @phases = map { [] } 0..4;
203 my $fsupgrade_idx = 1;
205 foreach (@statements) {
206 if ( /^ *(CREATE|ALTER) +TABLE/ ) {
207 # phase 0: CREATE TABLE, ALTER TABLE
208 push @{ $phases[0] }, $_;
209 } elsif ( /^ *ALTER +INDEX.* RENAME TO dbs_temp(\d+)/ ) {
210 # phase 1: rename index to dbs_temp%d
211 # (see DBIx::DBSchema::Table)
212 # but in this case, uniqueify all the dbs_temps. This method only works
213 # because they are in the right order to begin with...
214 my $dbstemp_idx = $1;
215 s/dbs_temp$dbstemp_idx/fsupgrade_temp$fsupgrade_idx/;
216 $idx_map{ $dbstemp_idx } = $fsupgrade_idx;
217 push @{ $phases[1] }, $_;
219 } elsif ( /^ *(CREATE|DROP)( +UNIQUE)? +INDEX/ ) {
220 # phase 2: create/drop indices
221 push @{ $phases[2] }, $_;
222 } elsif ( /^ *ALTER +INDEX +dbs_temp(\d+) +RENAME/ ) {
223 # phase 3: rename temp indices back to real ones
224 my $dbstemp_idx = $1;
225 my $mapped_idx = $idx_map{ $dbstemp_idx }
226 or die "unable to remap dbs_temp$1 RENAME statement";
227 s/dbs_temp$dbstemp_idx/fsupgrade_temp$mapped_idx/;
228 push @{ $phases[3] }, $_;
230 # phase 4: everything else (CREATE SEQUENCE, SELECT SETVAL, etc.)
231 push @{ $phases[4] }, $_;
237 foreach my $phase (@phases) {
238 warn "Starting schema changes, phase $i...\n";
239 while (@$phase or @busy) {
240 # check status of all running tasks
243 for my $clone (@busy) {
244 if ( $clone->pg_ready ) {
246 my $rv = $clone->pg_result && $clone->commit;
247 $failed_clone = $clone if !$rv;
250 push @newbusy, $clone;
253 if ( $failed_clone ) {
254 my $errstr = $failed_clone->errstr;
255 foreach my $clone (@newbusy, $failed_clone) {
256 $clone->pg_cancel if $clone->{pg_async_status} == 1;
262 if (my $statement = $phase->[0]) {
265 $clone = shift(@free);
266 } elsif ( !$MAX_HANDLES or
267 scalar(@free) + scalar(@busy) < $MAX_HANDLES ) {
268 $clone = $dbh->clone; # this will fail if over the server limit
272 my $rv = $clone->do($statement, {pg_async => PG_ASYNC});
275 shift @{ $phase }; # and actually take the statement off the queue
277 } # else I don't know, wait and retry
278 } # else too many handles, wait and retry
280 # all statements are dispatched
281 warn "Waiting for phase $i to complete\n";
284 } # while @$phase or @busy
287 warn "Schema changes complete.\n";
289 # warn "Pre-schema change upgrades completed in ". (time-$start). " seconds\n"; # if $DEBUG;
292 # dbdef->update_schema( dbdef_dist(datasrc), $dbh );
293 } else { # normal case, run statements sequentially
294 foreach my $statement ( @statements ) {
296 $dbh->do( $statement )
297 or die "Error: ". $dbh->errstr. "\n executing: $statement";
301 warn "Schema upgrade completed in ". (time-$start). " seconds\n"; # if $DEBUG;
305 $hashref->{dry_run} = 1 if $DRY_RUN;
306 $hashref->{debug} = 1 if $DEBUG && $DRY_RUN;
307 prune_applications($hashref) unless $opt_s;
309 warn "Application pruning completed in ". (time-$start). " seconds\n"; # if $DEBUG;
312 print "\n" if $DRY_RUN;
314 if ( $dbh->{Driver}->{Name} =~ /^mysql/i && ! $opt_s ) {
316 foreach my $table (qw( svc_acct svc_phone )) {
318 my $sth = $dbh->prepare(
319 "SELECT COUNT(*) FROM duplicate_lock WHERE lockname = '$table'"
320 ) or die $dbh->errstr;
322 $sth->execute or die $sth->errstr;
324 unless ( $sth->fetchrow_arrayref->[0] ) {
326 $sth = $dbh->prepare(
327 "INSERT INTO duplicate_lock ( lockname ) VALUES ( '$table' )"
328 ) or die $dbh->errstr;
330 $sth->execute or die $sth->errstr;
336 warn "Duplication lock creation completed in ". (time-$start). " seconds\n"; # if $DEBUG;
341 $dbh->commit or die $dbh->errstr;
343 dbdef_create($dbh, $dbdef_file);
345 $dbh->disconnect or die $dbh->errstr;
347 delete $FS::Schema::dbdef_cache{$dbdef_file}; #force an actual reload
348 $FS::UID::AutoCommit = 0;
349 $FS::UID::callback_hack = 1;
350 $dbh = adminsuidsetup($user);
351 $FS::UID::callback_hack = 0;
352 unless ( $DRY_RUN || $opt_s ) {
353 my $dir = "%%%FREESIDE_CONF%%%/conf.". datasrc;
354 if (!scalar(qsearch('conf', {}))) {
355 my $error = FS::Conf::init_config($dir);
357 warn "CONFIGURATION UPGRADE FAILED\n";
358 $dbh->rollback or die $dbh->errstr;
363 $dbh->commit or die $dbh->errstr;
364 $dbh->disconnect or die $dbh->errstr;
366 $FS::UID::AutoCommit = 1;
368 $dbh = adminsuidsetup($user);
370 warn "Re-initialization with updated schema completed in ". (time-$start). " seconds\n"; # if $DEBUG;
373 #### NEW CUSTOM FIELDS:
374 # 3. migrate old virtual field data to the new custom fields
376 $cfsth = $dbh->prepare("SELECT * FROM virtual_field left join part_virtual_field using (vfieldpart)")
378 $cfsth->execute or die $cfsth->errstr;
380 while ( $cf = $cfsth->fetchrow_hashref ) {
381 my $tbl = $cf->{'dbtable'};
382 my $name = $cf->{'name'};
383 my $dtable = dbdef->table($tbl);
384 next unless $dtable && $dtable->primary_key; # XXX: warn first?
385 my $pkey = $dtable->primary_key;
386 next unless $dtable->column($pkey)->type =~ /int/i; # XXX: warn first?
387 push @cfst, "UPDATE $tbl set cf_$name = '".$cf->{'value'}."' WHERE $pkey = ".$cf->{'recnum'};
388 push @cfst, "DELETE FROM virtual_field WHERE vfieldnum = ".$cf->{'vfieldnum'};
390 foreach my $cfst ( @cfst ) {
393 or die "Error: ". $dbh->errstr. "\n executing: $cfst";
395 warn "Custom fields data upgrade completed";
397 upgrade_config(%upgrade_opts)
398 unless $DRY_RUN || $opt_s;
400 $dbh->commit or die $dbh->errstr;
402 warn "Config updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
405 upgrade(%upgrade_opts)
406 unless $DRY_RUN || $opt_s;
408 $dbh->commit or die $dbh->errstr;
410 warn "Table updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
413 upgrade_sqlradius(%upgrade_opts)
414 unless $DRY_RUN || $opt_s || $opt_r;
416 warn "SQL RADIUS updates completed in ". (time-$start). " seconds\n"; # if $DEBUG;
419 $dbh->commit or die $dbh->errstr;
420 $dbh->disconnect or die $dbh->errstr;
422 warn "Final commit and disconnection completed in ". (time-$start). " seconds; upgrade done!\n"; # if $DEBUG;
426 sub dbdef_create { # reverse engineer the schema from the DB and save to file
427 my( $dbh, $file ) = @_;
428 my $dbdef = new_native DBIx::DBSchema $dbh;
433 die "Usage:\n freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -c ] [ -s ] [ -j ] [ -a ] user\n";
438 freeside-upgrade - Upgrades database schema for new freeside verisons.
442 freeside-upgrade [ -d ] [ -q | -v ] [ -r ] [ -c ] [ -s ] [ -j ] [ -a ]
446 Reads your existing database schema and updates it to match the current schema,
447 adding any columns or tables necessary.
449 Also performs other upgrade functions:
453 =item Calls FS:: Misc::prune::prune_applications (probably unnecessary every upgrade, but simply won't find any records to change)
455 =item If necessary, moves your configuration information from the filesystem in /usr/local/etc/freeside/conf.<datasrc> to the database.
459 [ -d ]: Dry run; output SQL statements (to STDOUT) only, but do not execute
462 [ -q ]: Run quietly. This may become the default at some point.
464 [ -v ]: Run verbosely, sending debugging information to STDERR. This is the
467 [ -s ]: Schema changes only. Useful for Pg/slony slaves where the data
468 changes will be replicated from the Pg/slony master.
470 [ -r ]: Skip sqlradius updates. Useful for occassions where the sqlradius
471 databases may be inaccessible.
473 [ -c ]: Skip cdr and h_cdr updates.
475 [ -j ]: Run certain upgrades asychronously from the job queue. Currently
476 used only for the 2.x -> 3.x cust_location, cust_pay and part_pkg
477 upgrades. This may cause odd behavior before the upgrade is
478 complete, so it's recommended only for very large cust_main, cust_pay
479 and/or part_pkg tables that take too long to upgrade.
481 [ -a ]: Run schema changes in parallel (Pg only). DBIx::DBSchema minimum
482 version 0.41 recommended. Recommended only for large databases and
483 powerful database servers, to reduce upgrade time.