* Finished anvil-delete-server! (More testing needed though)

* Fixed a bug in Cluster->shutdown_server() where the wrong variable was being evaluated when checking the server state.
* Created DRBD->delete_resource() that deletes a resource's backing device and configuration. Note that this wipes the DRBD MD and and FS signatures before removing the LV. Updated DRBD->gather_data() to record the backing devices for volumes.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 4 years ago
parent 549dbad635
commit 89dec8e1f9
  1. 2
      Anvil/Tools.pm
  2. 8
      Anvil/Tools/Cluster.pm
  3. 230
      Anvil/Tools/DRBD.pm
  4. 1
      Anvil/Tools/Database.pm
  5. 544
      notes
  6. 15
      share/words.xml
  7. 148
      tools/anvil-delete-server
  8. 3
      tools/test.pl

@ -1147,6 +1147,7 @@ sub _set_paths
lsblk => "/usr/bin/lsblk",
lvchange => "/usr/sbin/lvchange",
lvcreate => "/usr/sbin/lvcreate",
lvremove => "/usr/sbin/lvremove",
lvs => "/usr/sbin/lvs",
lvscan => "/usr/sbin/lvscan",
mailx => "/usr/bin/mailx",
@ -1206,6 +1207,7 @@ sub _set_paths
uuidgen => "/usr/bin/uuidgen",
virsh => "/usr/bin/virsh",
'virt-install' => "/usr/bin/virt-install",
wipefs => "/usr/sbin/wipefs",
vgs => "/usr/sbin/vgs",
vgscan => "/usr/sbin/vgscan",
wc => "/usr/bin/wc",

@ -807,7 +807,7 @@ sub delete_server
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0226", variables => {
server_name => $server_name,
return_code => $return_code,
output => $$output,
output => $output,
}});
return('!!error!!');
}
@ -2601,15 +2601,15 @@ sub shutdown_server
while($waiting)
{
$anvil->Cluster->parse_cib({debug => $debug});
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = defined $anvil->data->{cib}{parsed}{data}{server}{$server}{host} ? $anvil->data->{cib}{parsed}{data}{server}{$server}{host} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0554", variables => { server => $server }});
if ($host eq "running")
if ($status eq "running")
{
# Wait a bit and check again.
sleep 2;

@ -15,6 +15,7 @@ my $THIS_FILE = "DRBD.pm";
### Methods;
# allow_two_primaries
# delete_resource
# gather_data
# get_devices
# get_status
@ -236,6 +237,208 @@ sub allow_two_primaries
}
=head2 delete_resource
This method deletes an entire resource. It does this by looping through the volumes configured in a resource and deleting them one after the other (even if there is only one volume).
On success, C<< 0 >> is returned. If there are any issues, C<< !!error!! >> will be returned.
Parameters;
=head3 resource (required)
This is the name of the resource to be deleted.
=head3 wait (optional, default '1')
This controls whether we wait for a resource that is C<< Primary >> or C<< SyncSource >> to demote or the sync target to disconnect before proceeding. If whis is set to C<< 0 >>, instead of waiting, the method returns an error and aborts.
=cut
sub delete_resource
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "DRBD->delete_resource()" }});
my $resource = defined $parameter->{resource} ? $parameter->{resource} : "";
my $wait = defined $parameter->{'wait'} ? $parameter->{'wait'} : 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
resource => $resource,
'wait' => $wait
}});
if (not $resource)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "DRBD->delete_resource()", parameter => "resource" }});
return('!!error!!');
}
$anvil->DRBD->gather_data({debug => $debug});
if (not exists $anvil->data->{new}{resource}{$resource})
{
# Resource not found.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0228", variables => { resource => $resource }});
return('!!error!!');
}
my $waiting = 1;
while($waiting)
{
my $peer_needs_us = 0;
foreach my $volume (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}{$resource}{volume}})
{
# If we're sync source, or we're primary, we'll either wait or abort.
my $device_path = $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path};
my $backing_disk = $anvil->data->{new}{resource}{$resource}{volume}{$volume}{backing_disk};
my $device_minor = $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:device_path' => $device_path,
's2:backing_disk' => $backing_disk,
's3:device_minor' => $device_minor,
}});
$anvil->data->{drbd}{resource}{$resource}{backing_disk}{$backing_disk} = 1;
foreach my $peer (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}})
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"new::resource::${resource}::volume::${volume}::peer::${peer}::local_disk_state" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state},
}});
if (($anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} eq "startingsyncs") or
($anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} eq "syncsource") or
($anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} eq "pausedsyncs") or
($anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} eq "ahead"))
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "alert", key => "warning_0074", variables => {
peer_name => $peer,
resource => $resource,
volume => $volume,
disk_state => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state},
}});
$peer_needs_us = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { peer_needs_us => $peer_needs_us }});
}
}
}
if ($peer_needs_us)
{
if (not $wait)
{
# Abort.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0229"});
return('!!error!!');
}
else
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "alert", key => "log_0588"});
sleep 10;
$anvil->DRBD->gather_data({debug => $debug})
}
}
else
{
# No need to wait now.
$waiting = 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { waiting => $waiting }});
}
}
# Down the resource, if needed.
$anvil->DRBD->manage_resource({
debug => $debug,
resource => $resource,
task => "down",
});
# Wipe the DRBD MDs from each backing LV
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0590", variables => { resource => $resource }});
my $shell_call = $anvil->data->{path}{exe}{echo}." yes | ".$anvil->data->{path}{exe}{drbdadm}." wipe-md ".$resource;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { shell_call => $shell_call }});
my ($output, $return_code) = $anvil->System->call({shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
if ($return_code)
{
# Should have been '0'
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "err", key => "error_0230", variables => {
shell_call => $shell_call,
return_code => $return_code,
output => $output,
}});
return('!!error!!');
}
# Now wipefs and lvremove each backing device
foreach my $backing_disk (sort {$a cmp $b} keys %{$anvil->data->{drbd}{resource}{$resource}{backing_disk}})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0591", variables => { backing_disk => $backing_disk }});
my $shell_call = $anvil->data->{path}{exe}{wipefs}." --all ".$backing_disk;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { shell_call => $shell_call }});
my ($output, $return_code) = $anvil->System->call({shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
if ($return_code)
{
# Should have been '0'
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "err", key => "error_0230", variables => {
shell_call => $shell_call,
return_code => $return_code,
output => $output,
}});
return('!!error!!');
}
# Now delete the logical volume
$shell_call = $anvil->data->{path}{exe}{lvremove}." --force ".$backing_disk;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { shell_call => $shell_call }});
($output, $return_code) = $anvil->System->call({shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
if ($return_code)
{
# Should have been '0'
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "err", key => "error_0230", variables => {
shell_call => $shell_call,
return_code => $return_code,
output => $output,
}});
return('!!error!!');
}
}
# Now unlink the resource config file.
my $resource_file = $anvil->data->{new}{resource}{$resource}{config_file};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { resource_file => $resource_file }});
if (-f $resource_file)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0589", variables => { file => $resource_file }});
unlink $resource_file;
sleep 1;
if (-f $resource_file)
{
# WTF?
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "err", key => "log_0243", variables => { file => $resource_file }});
return('!!error!!');
}
else
{
# Success!
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "job_0134", variables => { file_path => $resource_file }});
}
}
return(0);
}
=head2 gather_data
This calls C<< drbdadm >> to collect the configuration of the local system and parses it. This methid is designed for use by C<< scan_drbd >>, but is useful elsewhere. This is note-worthy as the data is stored under a C<< new::... >> hash.
@ -361,12 +564,14 @@ sub gather_data
's2:meta_disk' => $meta_disk,
}});
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path} = $volume_vnr->findvalue('./device');
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor} = $volume_vnr->findvalue('./device/@minor');
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{size} = 0;
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path} = $volume_vnr->findvalue('./device');
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{backing_disk} = $volume_vnr->findvalue('./disk');
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor} = $volume_vnr->findvalue('./device/@minor');
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{size} = 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"s1:new::resource::${resource}::volume::${volume}::device_path" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path},
"s2:new::resource::${resource}::volume::${volume}::device_minor" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor},
"s2:new::resource::${resource}::volume::${volume}::backing_disk" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{backing_disk},
"s3:new::resource::${resource}::volume::${volume}::device_minor" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor},
}});
}
}
@ -1093,11 +1298,13 @@ ORDER BY
This parses the DRBD status on the local or remote system. The data collected is stored in the following hashes;
- drbd::status::<host_name>::resource::<resource_name>::{ap-in-flight,congested,connection-state,peer-node-id,rs-in-flight}
- drbd::status::<host_name>::resource::<resource_name>::connection::<peer_host_name>::volume::<volume>::{has-online-verify-details,has-sync-details,out-of-sync,peer-client,peer-disk-state,pending,percent-in-sync,received,replication-state,resync-suspended,sent,unacked}
- # If the volume is resyncing, these additional values will be set:
- drbd::status::<host_name>::resource::<resource_name>::connection::<peer_host_name>::volume::<volume>::{db-dt MiB-s,db0-dt0 MiB-s,db1-dt1 MiB-s,estimated-seconds-to-finish,percent-resync-done,rs-db0-sectors,rs-db1-sectors,rs-dt-start-ms,rs-dt0-ms,rs-dt1-ms,rs-failed,rs-paused-ms,rs-same-csum,rs-total,want}
- drbd::status::<host_name>::resource::<resource>::devices::volume::<volume>::{al-writes,bm-writes,client,disk-state,lower-pending,minor,quorum,read,size,upper-pending,written}
drbd::status::<host_name>::resource::<resource_name>::{ap-in-flight,congested,connection-state,peer-node-id,rs-in-flight}
drbd::status::<host_name>::resource::<resource_name>::connection::<peer_host_name>::volume::<volume>::{has-online-verify-details,has-sync-details,out-of-sync,peer-client,peer-disk-state,pending,percent-in-sync,received,replication-state,resync-suspended,sent,unacked}
If the volume is resyncing, these additional values will be set:
drbd::status::<host_name>::resource::<resource_name>::connection::<peer_host_name>::volume::<volume>::{db-dt MiB-s,db0-dt0 MiB-s,db1-dt1 MiB-s,estimated-seconds-to-finish,percent-resync-done,rs-db0-sectors,rs-db1-sectors,rs-dt-start-ms,rs-dt0-ms,rs-dt1-ms,rs-failed,rs-paused-ms,rs-same-csum,rs-total,want}
drbd::status::<host_name>::resource::<resource>::devices::volume::<volume>::{al-writes,bm-writes,client,disk-state,lower-pending,minor,quorum,read,size,upper-pending,written}
If any data for the host was stored in a previous call, it will be deleted before the new data is collected and stored.
@ -1184,6 +1391,11 @@ sub get_status
}});
}
if (exists $anvil->data->{drbd}{status}{$host})
{
delete $anvil->data->{drbd}{status}{$host};
}
# Parse the output.
my $json = JSON->new->allow_nonref;
my $drbd_status = $json->decode($output);

@ -4930,6 +4930,7 @@ SET
WHERE
anvil_uuid = ".$anvil->Database->quote($anvil_uuid)."
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { query => $query }});
$anvil->Database->write({uuid => $uuid, query => $query, source => $file ? $file." -> ".$THIS_FILE : $THIS_FILE, line => $line ? $line." -> ".__LINE__ : __LINE__});
}
return($anvil_uuid);

544
notes

@ -576,280 +576,6 @@ argument ... argument is repeatable.
====
BEGIN TRANSACTION;
DROP FUNCTION history_alerts() CASCADE;
DROP TABLE history.alerts;
DROP TABLE alerts;
CREATE TABLE alerts (
alert_uuid uuid not null primary key,
alert_host_uuid uuid not null, -- The name of the node or dashboard that this alert came from.
alert_set_by text not null,
alert_level integer not null, -- 1 (critical), 2 (warning), 3 (notice) or 4 (info)
alert_title text not null, -- ScanCore will read in the agents <name>.xml words file and look for this message key
alert_message text not null, -- ScanCore will read in the agents <name>.xml words file and look for this message key
alert_sort_position integer not null default 9999, -- The alerts will sort on this column. It allows for an optional sorting of the messages in the alert.
alert_show_header integer not null default 1, -- This can be set to have the alert be printed with only the contents of the string, no headers.
modified_date timestamp with time zone not null,
FOREIGN KEY(alert_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE alerts OWNER TO admin;
CREATE TABLE history.alerts (
history_id bigserial,
alert_uuid uuid,
alert_host_uuid uuid,
alert_set_by text,
alert_level integer,
alert_title text,
alert_message text,
alert_sort_position integer,
alert_show_header integer,
modified_date timestamp with time zone not null
);
ALTER TABLE history.alerts OWNER TO admin;
CREATE FUNCTION history_alerts() RETURNS trigger
AS $$
DECLARE
history_alerts RECORD;
BEGIN
SELECT INTO history_alerts * FROM alerts WHERE alert_uuid = new.alert_uuid;
INSERT INTO history.alerts
(alert_uuid,
alert_host_uuid,
alert_set_by,
alert_level,
alert_title,
alert_title_variables,
alert_message,
alert_message_variables,
alert_sort_position,
alert_show_header,
modified_date)
VALUES
(history_alerts.alert_uuid,
history_alerts.alert_host_uuid,
history_alerts.alert_set_by,
history_alerts.alert_level,
history_alerts.alert_title,
history_alerts.alert_message,
history_alerts.alert_sort_position,
history_alerts.alert_show_header,
history_alerts.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_alerts() OWNER TO admin;
CREATE TRIGGER trigger_alerts
AFTER INSERT OR UPDATE ON alerts
FOR EACH ROW EXECUTE PROCEDURE history_alerts();
COMMIT;
====
3rd party stuff;
dnf install autoconf automake bzip2-devel corosynclib-devel gnutls-devel help2man libqb-devel libtool \
libtool-ltdl-devel libuuid-devel libxml2-devel libxslt-devel pam-devel "pkgconfig(dbus-1)" \
"pkgconfig(glib-2.0)" python3-devel asciidoc inkscape publican booth-site diffstat \
fence-agents-apc fence-agents-ipmilan fence-agents-scsi fence-virt python3-lxml ruby-devel \
rubygem-backports rubygem-ethon rubygem-ffi rubygem-multi_json rubygem-open4 rubygem-rack \
rubygem-rack-protection rubygem-rack-test rubygem-sinatra rubygem-test-unit flex perl-generators
----
pacemaker;
git tag
git checkout <2.x>
make srpm
mv /home/digimer/anvil/builds/pacemaker/pacemaker-2.0.0-0.1.rc2.fedora.src.rpm ~/rpmbuild/SRPMS/
----
pcs;
- Requires pacemaker 2.x install
git tag
git checkout <0.10.x>
====
Building 3rd party tools
dnf -y install bzip2-devel corosynclib-devel docbook-style-xsl gnutls-devel help2man libqb-devel libtool libtool-ltdl-devel libuuid-devel libxml2-devel libxslt-devel ncurses-devel pam-devel
==========
Build Dependency chain;
perl-Log-Log4perl
perl-Log-Dispatch-FileRotate
perl-MIME-Lite
===========================================================================================================
RPM build order
--- Install from RHEL repos;
- Groups;
dnf group install development rpm-development-tools
- Uninstall
dnf remove biosdevname
- Packages
dnf -y install bash-completion bind-utils dnf-utils firefox gdm libgcrypt-devel libutempter-devel libvirt-bash-completion ncurses-devel openssl-devel pam-devel perl-Capture-Tiny perl-Devel-CheckLib perl-Digest-SHA1 perl-ExtUtils-CBuilder perl-ExtUtils-MakeMaker perl-HTML-Parser \
perl-IO-stringy perl-MailTools perl-Module-Build perl-Module-Install perl-Module-Install-ReadmeFromPod perl-Net-DNS perl-Test-Exception perl-Test-Simple perl-Test-Pod perl-Test-Pod-Coverage \
perl-Test2-Suite rpm-build systemd-devel texinfo virt-manager wget \
perl-Params-ValidationCompiler perl-Dist-CheckConflicts perl-namespace-autoclean perl-Test-Fatal perl-Devel-GlobalDestruction perl-IPC-Run3 perl-Specio perl-Sys-Syslog postfix perl-DBI perl-IPC-SysV perl-Test perl-Filter rrdtool-perl perl-Test-Warn perl-Date-Manip \
perl-MIME-Types python36 docbook-style-xsl libxslt flex kernel-devel perl-IPC-System-Simple xdg-user-dirs perl-Import-Into perl-Class-XSAccessor perl-Sub-Name perl-DynaLoader-Functions perl-Devel-CallChecker perl-Test-Requires \
apr-devel apr-util-devel gdbm-devel httpd-devel perl-CGI perl-libwww-perl perl-Path-Tiny perl-ExtUtils-Embed perl-XML-DOM perl-Test-Taint perl-Readonly perl-Devel-Peek perl-String-ShellQuote elfutils-libelf-devel po4a
--- First round of builds;
alteeve-el8-repo
anvil
anvil-striker-extra
htop
libssh2
perl-Algorithm-C3
perl-B-Compiling
perl-BSD-Resource
perl-Devel-ArgNames
perl-Devel-Refcount
perl-Email-Date-Format
perl-Exporter-Lite
perl-ExtUtils-Config
perl-ExtUtils-Depends
perl-ExtUtils-Helpers
perl-Eval-WithLexicals
perl-Fennec-Lite
perl-HTML-Strip
perl-File-BaseDir
perl-File-MimeInfo
perl-IO-CaptureOutput
perl-Lexical-SealRequireHints
perl-Linux-Pid
perl-Log-Journald
perl-Net-Domain-TLD
perl-Mail-Sender
perl-Mail-Sendmail
perl-Module-Install-GithubMeta
perl-Net-Domain-TLD
perl-Net-OpenSSH # Requires the builder enter their password
perl-Proc-Simple
perl-Sub-Quote
perl-Test-Identity
perl-Test-Needs
perl-Test-UseAllModules
perl-UUID-Tiny
--- Second round of builds;
# RHEL 8 blocks the install of libssh2-devel via dnf, so rpm to instal it
rpm -Uvh /var/www/html/repo/RPMS/x86_64/libssh2-1.8.0-8.el8.x86_64.rpm /var/www/html/repo/RPMS/x86_64/libssh2-devel-1.8.0-8.el8.x86_64.rpm
dnf -y install perl-IO-CaptureOutput perl-Exporter-Lite perl-Module-Install-GithubMeta perl-Net-Domain-TLD perl-Test-UseAllModules perl-Mail-Sender perl-Mail-Sendmail perl-Test-Needs perl-Email-Date-Format perl-ExtUtils-Depends perl-B-Compiling perl-Lexical-SealRequireHints \
perl-Sub-Quote perl-BSD-Resource perl-Linux-Pid perl-Algorithm-C3 perl-Fennec-Lite perl-Devel-ArgNames perl-Devel-Refcount perl-ExtUtils-Config perl-ExtUtils-Helpers perl-Test-Identity perl-Eval-WithLexicals
mod_perl # NOTE: When --sign'ing it, it throws an error. Build without '--sign' and then do 'rpm --addsign <all rpms>'
perl-B-Hooks-OP-Check
perl-Class-C3
perl-Data-Dumper-Concise
perl-Email-Valid
perl-ExtUtils-InstallPaths
perl-Lexical-Var
perl-Meta-Builder
perl-MIME-Lite
perl-Module-Install-AutoLicense
perl-Net-Netmask
perl-Test-Refcount
--- Third round of builds;
dnf -y install perl-Email-Valid perl-Module-Install-AutoLicense perl-MIME-Lite perl-B-Hooks-OP-Check perl-Lexical-Var mod_perl perl-Class-C3 perl-Data-Dumper-Concise perl-ExtUtils-InstallPaths perl-Meta-Builder perl-Test-Refcount
perl-bareword-filehandles
perl-Email-Find
perl-Future
perl-Log-Dispatch
perl-Module-Build-Tiny
perl-Module-Install-CheckLib
perl-multidimensional
### NOTE: These two need to be built using bootstrap, and will be rebuilt later.
perl-Devel-Declare # NOTE: rpmbuild -ba --define='perl_bootstrap=1' perl-Devel-Declare.spec, build perl-Devel-CallParser, rebuild this without bootstrap.
perl-indirect # NOTE: rpmbuild -ba --define='perl_bootstrap=1' perl-indirect.spec, build perl-Devel-CallParser, rebuild this without bootstrap.
--- Fourth round of builds;
dnf -y install perl-Email-Find perl-Module-Install-CheckLib perl-Log-Dispatch perl-Devel-Declare perl-bareword-filehandles perl-multidimensional perl-indirect perl-Future perl-Module-Build-Tiny
perl-aliased
perl-Devel-CallParser
perl-HTML-FromText
perl-Log-Dispatch-FileRotate
perl-Net-SSH2
perl-strictures
--- Fifth round of builds;
dnf -y install perl-Log-Dispatch perl-strictures perl-Devel-CallParser perl-Log-Dispatch-FileRotate perl-aliased
perl-Exporter-Declare
perl-Log-Log4perl
perl-Moo
### NOTE: We're rebuilding these two, this time without bootstrap
perl-Devel-Declare
perl-indirect
--- Sixth round of builds;
### NOTE: expire-cache isn't enough to clear the boot-strapped versions
dnf clean all
dnf reinstall perl-Devel-Declare perl-indirect
dnf -y install perl-Moo perl-Log-Log4perl perl-Exporter-Declare
perl-CPAN-Changes
perl-Log-Contextual
--- Seventh round of builds;
dnf -y install perl-CPAN-Changes perl-Log-Contextual
perl-File-DesktopEntry
perl-Object-Remote
--- Seventh round, final installs.
dnf -y install perl-File-DesktopEntry perl-Object-Remote perl-Net-OpenSSH
=======================================
cd SOURCES;
tar -xzvf $tarball (or xjvf, whatever);
tar -xzv -C package-digimer -f $tarball;
patch in *-digimer;
diff -uNr $package $package-digimer > ../SOURCES/whatever.patch
00:24 < Bahhumbug>
Burst the source tarball;
do it again but stash this copy in a *-digimer directory.
Do patching in the -digimer directory and when finished great a recursive unified diff and place the results directly in the named .patch file.
cd ~/rpmbuild/SOURCES
tar -xzvf htop-2.2.0.tar.gz
mv htop-2.2.0 htop-2.2.0-digimer
cd htop-2.2.0-digimer
# patch
diff -uNr htop-2.2.0 htop-2.2.0-digimer > whatever.patch
# Fabio's way
diff -Naurd htop-2.2.0 htop-2.2.0-digimer > htop_python3_MakeHeader.patch
=======================================
chrissie's cluster script
@ -895,202 +621,9 @@ virt-install --connect qemu:///system \
# Migration;
pcs constraint remove $(pcs constraint show --full | grep ban-srv07-el6 | perl -pe 's/^.*?id:(.*?)\)/$1/')
DRBD 9 - Check;
/sys/kernel/debug/drbd/resources/${resource_name}/connections/${hostname}/0/proc_drbd
====== New style
<?xml version="1.0" encoding="UTF-8"?>
<!--
Generated on: 2019-06-20, 15:32:27
Striker Version: 2.0.7
-->
<anvil name="xx-anvil-01">
<machines>
<node name="xx-a01n01.digimer.ca" uuid="xxx">
<network name="bcn1" ntp="" ethtool_opts="" mtu="1500" default_gateway="0" >
<!-- subnet can be in "/xx" format -->
<address ip="10.201.10.1" subnet="255.255.0.0" gateway="" default_gateway="0" dns="" />
<interface name="bcn1_link1" mac="xx:xx:xx:xx:xx:xx"/>
<interface name="bcn1_link2" mac="xx:xx:xx:xx:xx:yy"/>
</network>
<fence>
<!-- IPMI data comes from hosts -> host_ipmi. If it is found, it always is used as the first fence device -->
<!-- PDU shows how to reference devices -->
<method name="pdu" type="pdu" order="1">
<!-- The 'name' parameter has to match an entry under devices -> pdu's name -->
<device name="xx-pdu01" port="1" />
<device name="xx-pdu02" port="2" />
</method>
<!-- This would only happen on it's own, but is here for example. The 'server_name' is the name of the VM on the host -->
<method name="kvm" type="kvm" order="1">
<device name="host1" server_name="xx-a01n01" />
</method>
</fence>
<power>
<!-- The 'name' parameter has to match an entry under devices -> ups's name -->
<ups name="xx-ups01" />
<ups name="xx-ups02" />
</power>
</node>
<dr name="xx-a01dr01.digimer.ca" uuid="xxx">
<!-- IPMI is used to power on/off for scheduled, periodic resyncs. -->
</dr>
</machines>
<!-- These devices need to reference entries in the 'fences' database table.
<fences>
<!-- When a machine references these, the 'type="x"' references the child element and the contained 'name="x"' references the child's child element by name -->
<pdu>
<pdu name="xx-pdu01" agent="fence_apc_snmp" address="10.20.2.1" />
<pdu name="xx-pdu02" agent="fence_apc_snmp" address="10.20.2.2" />
</pdu>
<!-- UPSes are used so that we know which UPSes feed a given node, when deciding power event actions -->
<ups>
<ups name="xx-ups01" address="10.20.3.1" />
<ups name="xx-ups02" address="10.20.3.2" />
</ups>
<!-- In cases where VMs are being used. Later we can add support for VMWare -->
<kvm>
<kvm name="host1" address="192.168.122.1" user="root" password="xxx" />
</kvm>
</fences>
</anvil>
====== Old manifest style
<?xml version="1.0" encoding="UTF-8"?>
<!--
Generated on: 2019-06-20, 15:32:27
Striker Version: 2.0.7
-->
<config>
<node name="mk-a01n01.digimer.ca" uuid="71822143-2a4d-43b4-839b-7c66b3c2e4d7">
<network>
<bcn ip="10.20.10.1" />
<sn ip="10.10.10.1" />
<ifn ip="10.255.10.1" />
</network>
<ipmi>
<on reference="ipmi_n01" ip="10.20.11.1" netmask="255.255.0.0" user="admin" password="Initial1" gateway="" lanplus="false" privlvl="USER" />
</ipmi>
<pdu>
<on reference="pdu01" port="1" />
<on reference="pdu02" port="1" />
<on reference="pdu03" port="" />
<on reference="pdu04" port="" />
</pdu>
<kvm>
<!-- port == virsh name of VM -->
<on reference="kvm_host" port="" />
</kvm>
<interfaces>
<interface name="bcn_link1" mac="f8:0f:41:f8:6b:fe" />
<interface name="bcn_link2" mac="00:19:99:ff:ba:b4" />
<interface name="sn_link1" mac="f8:0f:41:f8:6b:ff" />
<interface name="sn_link2" mac="00:19:99:ff:8b:5a" />
<interface name="ifn_link1" mac="00:19:99:ff:ba:b5" />
<interface name="ifn_link2" mac="00:19:99:ff:8b:59" />
</interfaces>
</node>
<node name="mk-a01n02.digimer.ca" uuid="f7a7b2be-a10a-40f0-991d-2265e3ec3cce">
<network>
<bcn ip="10.20.10.2" />
<sn ip="10.10.10.2" />
<ifn ip="10.255.10.2" />
</network>
<ipmi>
<on reference="ipmi_n02" ip="10.20.11.2" netmask="255.255.0.0" user="admin" password="Initial1" gateway="" lanplus="false" privlvl="USER" />
</ipmi>
<pdu>
<on reference="pdu01" port="2" />
<on reference="pdu02" port="2" />
<on reference="pdu03" port="" />
<on reference="pdu04" port="" />
</pdu>
<kvm>
<on reference="kvm_host" port="" />
</kvm>
<interfaces>
<interface name="bcn_link1" mac="00:26:2d:0c:a8:74" />
<interface name="bcn_link2" mac="00:19:99:ff:bb:4e" />
<interface name="sn_link1" mac="00:26:2d:0c:a8:75" />
<interface name="sn_link2" mac="00:19:99:ff:bb:8b" />
<interface name="ifn_link1" mac="00:19:99:ff:bb:4f" />
<interface name="ifn_link2" mac="00:19:99:ff:bb:8a" />
</interfaces>
</node>
<common>
<networks>
<bcn netblock="10.20.0.0" netmask="255.255.0.0" gateway="" defroute="no" ethtool_opts="" />
<sn netblock="10.10.0.0" netmask="255.255.0.0" gateway="" defroute="no" ethtool_opts="" />
<ifn netblock="10.255.0.0" netmask="255.255.0.0" gateway="10.255.255.254" dns1="8.8.8.8" dns2="8.8.4.4" ntp1="" ntp2="" defroute="yes" ethtool_opts="" />
<bonding opts="mode=1 miimon=100 use_carrier=1 updelay=120000 downdelay=0">
<bcn name="bcn_bond1" primary="bcn_link1" secondary="bcn_link2" />
<sn name="sn_bond1" primary="sn_link1" secondary="sn_link2" />
<ifn name="ifn_bond1" primary="ifn_link1" secondary="ifn_link2" />
</bonding>
<bridges>
<bridge name="ifn_bridge1" on="ifn" />
</bridges>
<mtu size="1500" />
</networks>
<repository urls="" />
<media_library size="40" units="GiB" />
<storage_pool_1 size="100" units="%" />
<anvil prefix="mk" sequence="01" domain="digimer.ca" password="Initial1" striker_user="" striker_database="" />
<ssh keysize="8191" />
<cluster name="mk-anvil-01">
<!-- Set the order to 'kvm' if building on KVM-backed VMs. Also set each node's 'port=' above and '<kvm>' element attributes below. -->
<fence order="ipmi,pdu" post_join_delay="90" delay="15" delay_node="mk-a01n01.digimer.ca" />
</cluster>
<drbd>
<disk disk-barrier="no" disk-flushes="no" md-flushes="no" c-plan-ahead="1" c-max-rate="110M" c-min-rate="30M" c-fill-target="1M" />
<options cpu-mask="" />
<net max-buffers="8192" sndbuf-size="" rcvbuf-size="" />
</drbd>
<switch>
<switch name="mk-switch01.digimer.ca" ip="10.20.1.1" />
<switch name="mk-switch02.digimer.ca" ip="10.20.1.2" />
</switch>
<ups>
<ups name="mk-ups01.digimer.ca" type="apc" port="3551" ip="10.20.3.1" />
<ups name="mk-ups02.digimer.ca" type="apc" port="3552" ip="10.20.3.2" />
</ups>
<pdu>
<pdu reference="pdu01" name="mk-pdu01.digimer.ca" ip="10.20.2.1" agent="fence_apc_alteeve" />
<pdu reference="pdu02" name="mk-pdu02.digimer.ca" ip="10.20.2.2" agent="fence_apc_alteeve" />
</pdu>
<ipmi>
<ipmi reference="ipmi_n01" agent="fence_ipmilan" />
<ipmi reference="ipmi_n02" agent="fence_ipmilan" />
</ipmi>
<kvm>
<kvm reference="kvm_host" ip="192.168.122.1" user="root" password="" password_script="" agent="fence_virsh" />
</kvm>
<striker>
<striker name="mk-striker01.digimer.ca" bcn_ip="10.20.4.1" ifn_ip="10.255.4.1" database="" user="" password="" uuid="" />
<striker name="mk-striker02.digimer.ca" bcn_ip="10.20.4.2" ifn_ip="10.255.4.2" database="" user="" password="" uuid="" />
</striker>
<update os="true" />
<iptables>
<vnc ports="100" />
</iptables>
<servers>
<!-- This isn't used anymore, but this section may be useful for other things in the future, -->
<!-- <provision use_spice_graphics="0" /> -->
</servers>
<tools>
<use anvil-safe-start="true" anvil-kick-apc-ups="false" />
</tools>
</common>
</config>
# Attach a network interface:
virsh attach-interface win2019_test bridge ifn_bridge1 --live --model virtio
@ -1133,3 +666,80 @@ drbdadm disconnect <res>
drbdadm connect --discard-my-data <res>
# Node to save data on;
drbdadm connect <res>
==================
# Server srv01-sql, example showing two disks in one VM.
resource srv01-sql {
on mk-a02n01 {
node-id 0;
volume 0 {
device /dev/drbd_srv01-sql_0 minor 0;
disk /dev/rhel/srv01-sql_0;
meta-disk internal;
}
volume 1 {
device /dev/drbd_srv01-sql_1 minor 1;
disk /dev/rhel/srv01-sql_1;
meta-disk internal;
}
}
on mk-a02n02 {
node-id 1;
volume 0 {
device /dev/drbd_srv01-sql_0 minor 0;
disk /dev/rhel/srv01-sql_0;
meta-disk internal;
}
volume 1 {
device /dev/drbd_srv01-sql_1 minor 1;
disk /dev/rhel/srv01-sql_1;
meta-disk internal;
}
}
on mk-a02dr01 {
node-id 2;
volume 0 {
device /dev/drbd_srv01-sql_0 minor 0;
disk /dev/rhel_new-dr/srv01-sql_0;
meta-disk internal;
}
volume 1 {
device /dev/drbd_srv01-sql_1 minor 1;
disk /dev/rhel_new-dr/srv01-sql_1;
meta-disk internal;
}
}
### NOTE: Remember to open the appropriate firewall port!
# firewall-cmd --zone=SN1 --permanent --add-port=7788/tcp --permanent
# firewall-cmd --zone=SN1 --permanent --add-port=7788/tcp
connection {
host mk-a02n01 address 10.101.12.1:7788;
host mk-a02n02 address 10.101.12.2:7788;
net {
protocol C;
fencing resource-and-stonith;
}
}
connection {
host mk-a02n01 address 10.101.12.1:7789;
host mk-a02dr01 address 10.101.12.3:7789;
net {
protocol A;
fencing dont-care;
}
}
connection {
host mk-a02n02 address 10.101.12.2:7790;
host mk-a02dr01 address 10.101.12.3:7790;
net {
protocol A;
fencing dont-care;
}
}
}
==================

@ -303,13 +303,16 @@ Output (if any):
<key name="error_0218">Unable to connect to any databases, unable to continue.</key>
<key name="error_0219">Unable to find the server uuid to delete from the job UUID: [#!variable!job_uuid!#].</key>
<key name="error_0220">Unable to find a server name to match the server UUID: [#!variable!server_uuid!#].</key>
<key name="error_0221">The server: [#!variable!server_name!#] is already marked as DELETED.</key>
<key name="error_0221">#!free!#</key>
<key name="error_0222">The cluster does not appear to be running, unable to delete a server at this time. We'll sleep for a bit and then exit, and the try again.</key>
<key name="error_0223">The server: [#!variable!server_name!#] appears to have failed to stop.</key>
<key name="error_0224">Unable to delete the server resource: [#!variable!server_name!#] as the cluster isn't running or there was a problem parsing the cluster CIB.</key>
<key name="error_0225">Unable to delete the server resource: [#!variable!server_name!#] as this node is not (yet) a full member of the cluster.</key>
<key name="error_0226">It looks like to removal of the server resource: [#!variable!server_name!#] failed. The return code should have been '0', but: [#!variable!return_code!#] was returned. The 'pcs' command output, if any, was: [#!variable!output!#].</key>
<key name="error_0227">It looks like to removal of the server resource: [#!variable!server_name!#] failed. Unsafe to proceed with the removal of the server. Please check the logs for more information.</key>
<key name="error_0228">Unable to delete the resource: [#!variable!resource!#] because it wasn't found in DRBD's config.</key>
<key name="error_0229">One or more peers need us, and we're not allowed to wait. Deletion aborted.</key>
<key name="error_0230">The shell call: [#!variable!shell_call!#] was expected to return '0', but instead the return code: [#!variable!return_code!#] was received. The output, if any, was: [#!variable!output!#].</key>
<!-- Files templates -->
<!-- NOTE: Translating these files requires an understanding of which likes are translatable -->
@ -639,6 +642,11 @@ It should be provisioned in the next minute or two.</key>
<key name="job_0209">This deletes a server from an Anvil! system.</key>
<key name="job_0210">Asking pacemaker to stop the server: [#!variable!server_name!#].</key>
<key name="job_0211">The server: [#!variable!server_name!#] is now stopped in pacemaker.</key>
<key name="job_0212">Registered a job with: [#!variable!host_name!#] to delete it's records of this server.</key>
<key name="job_0213">Deleting the replicated storage resource behind this server.</key>
<key name="job_0214">Storage has been released. Checking that the server has flagged as deleted in the database.</key>
<key name="job_0215">The server has been flagged as deleted now.</key>
<key name="job_0216">The server delete is complete on this host!</key>
<!-- Log entries -->
<key name="log_0001">Starting: [#!variable!program!#].</key>
@ -1320,6 +1328,10 @@ The file: [#!variable!file!#] needs to be updated. The difference is:
<key name="log_0585">The server: [#!variable!server_name!#]'s current status is: [#!variable!status!#].</key>
<key name="log_0586">The server: [#!variable!server_name!#] is now off.</key>
<key name="log_0587">The server: [#!variable!server_name!#] has been removed from Pacemaker.</key>
<key name="log_0588">We're required by at least one peer, so we'll wait a bit and check to see if they still need us before we proceed with the deletion.</key>
<key name="log_0589">Deleting the file: [#!variable!file!#].</key>
<key name="log_0590">Wiping the metadata from the DRBD resource: [#!variable!resource!#].</key>
<key name="log_0591">Wiping any file system signatures and then deleting the logical volume: [#!variable!device_path!#].</key>
<!-- Messages for users (less technical than log entries), though sometimes used for logs, too. -->
<key name="message_0001">The host name: [#!variable!target!#] does not resolve to an IP address.</key>
@ -2210,6 +2222,7 @@ Read UUID: .... [#!variable!read_uuid!#]
<key name="warning_0071">[ Warning ] - We were asked to create a new storage group called: [#!variable!name!#] but that name is already used by the group with UUID: [#!variable!uuid!#].</key>
<key name="warning_0072">[ Warning ] - The file: [#!variable!file_path!#] was not found on any accessible Striker dashboard (or it isn't the same size as recorded in the database). Will sleep for a minute and exit, then we'll try again.</key>
<key name="warning_0073">[ Warning ] - No databases are available. Some functions of this resource agent will not be available.</key>
<key name="warning_0074">[ Warning ] - Our disk state for the peer: [#!variable!peer_name!#] on resource: [#!variable!resource!#], volume: [#!variable!volume!#] is: [#!variable!disk_state!#].</key>
<!-- The entries below here are not sequential, but use a key to find the entry. -->
<!-- Run 'striker-parse-os-list to find new entries. -->

@ -129,17 +129,78 @@ sub run_jobs
# This parses the jobs::job_data intp variables.
parse_job_data($anvil);
my $host_type = $anvil->Get->host_type();
my $server_uuid = $anvil->data->{job}{server_uuid};
my $server_name = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_name};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
host_type => $host_type,
server_uuid => $server_uuid,
server_name => $server_name,
}});
if (not $anvil->data->{job}{peer_mode})
{
# Remove the server from pacemaker (stopping it, if necessary).
remove_from_pacemaker($anvil);
}
# Now parse the DRBD resources under this server. We'll log into the peers (if any) that are
# connected and down their DRBD resources under this server).
$anvil->Job->update_progress({
progress => 50,
message => "job_0213",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "job_0213"});
### NOTE: If we're a DR host, and the server wasn't used here, this is expected to fail
# Delete the DRBD resource and backing storage
my $problem = $anvil->DRBD->delete_resource({debug => 2, resource => $server_name});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
if (($problem) && ($host_type eq "node"))
{
# Something went wrong
$anvil->Job->update_progress({
progress => 100,
message => "error_0228,!!resource!".$server_name."!!",
job_status => "failed",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0228", variables => { resource => $server_name }});
$anvil->nice_exit({exit_code => 1});
}
$anvil->Job->update_progress({
progress => 60,
message => "job_0214",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "job_0214"});
# Make sure the server is flagged as DELETEd.
$anvil->Database->get_servers();
my $server_state = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_state};
if ($server_state ne "DELETED")
{
my $query = "
UPDATE
servers
SET
server_state = 'DELETED',
modified_date = ".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
WHERE
server_uuid = ".$anvil->Database->quote($server_uuid).";";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 1, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
$anvil->Job->update_progress({
progress => 70,
message => "job_0215",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "job_0215"});
}
$anvil->Job->update_progress({
progress => 100,
message => "job_0216",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "job_0216"});
return(0);
}
@ -150,6 +211,10 @@ sub remove_from_pacemaker
my $server_uuid = $anvil->data->{job}{server_uuid};
my $server_name = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_name};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
server_uuid => $server_uuid,
server_name => $server_name,
}});
# Sanity checks passed
$anvil->Job->update_progress({
@ -198,7 +263,47 @@ sub remove_from_pacemaker
$anvil->nice_exit({exit_code => 1});
}
# Register the job with the peers.
my $anvil_uuid = $anvil->Cluster->get_anvil_uuid();
my $peers = [];
if ($anvil->data->{anvils}{anvil_uuid}{$anvil_uuid}{anvil_node1_host_uuid} eq $anvil->Get->host_uuid)
{
# We're node 1
push @{$peers}, $anvil->data->{anvils}{anvil_uuid}{$anvil_uuid}{anvil_node2_host_uuid}
}
else
{
# We're node 2
push @{$peers}, $anvil->data->{anvils}{anvil_uuid}{$anvil_uuid}{anvil_node1_host_uuid}
}
if ($anvil->data->{anvils}{anvil_uuid}{$anvil_uuid}{anvil_dr1_host_uuid})
{
# There's a DR host.
push @{$peers}, $anvil->data->{anvils}{anvil_uuid}{$anvil_uuid}{anvil_dr1_host_uuid};
}
my $progress = 30;
foreach my $host_uuid (@{$peers})
{
my ($job_uuid) = $anvil->Database->insert_or_update_jobs({
debug => 2,
job_command => $anvil->data->{path}{exe}{'anvil-delete-server'},
job_data => "server_uuid=".$server_uuid."\npeer_mode=true",
job_name => "server::delete",
job_title => "job_0208",
job_description => "job_0209",
job_progress => 0,
job_host_uuid => $host_uuid,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { job_uuid => $job_uuid }});
my $host_name = $anvil->Get->host_name_from_uuid({host_uuid => $host_uuid});
$anvil->Job->update_progress({
progress => $progress,
message => "job_0212,!!host_name!".$host_name."!!",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "job_0212", variables => { host_name => $host_name }});
$progress += 10;
}
return(0);
}
@ -218,7 +323,7 @@ sub parse_job_data
$anvil->data->{job}{server_uuid} = $1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { 'job::server_uuid' => $anvil->data->{job}{server_uuid} }});
}
if ($line =~ /peer_mode=true$/)
if ($line =~ /peer_mode=true/)
{
$anvil->data->{job}{peer_mode} = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { 'job::peer_mode' => $anvil->data->{job}{peer_mode} }});
@ -251,30 +356,23 @@ sub parse_job_data
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0220", variables => { server_uuid => $server_uuid }});
$anvil->nice_exit({exit_code => 1});
}
elsif ($anvil->data->{servers}{server_uuid}{$server_uuid}{server_state} eq "DELETED")
{
# The server is already gone.
my $server_name = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_name};
$anvil->Job->update_progress({
progress => 100,
message => "error_0221,!!server_name!".$server_name."!!",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0221", variables => { server_name => $server_name }});
$anvil->nice_exit({exit_code => 1});
}
my $problem = $anvil->Cluster->parse_cib({debug => 2});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
if ($problem)
my $host_type = $anvil->Get->host_type();
if ($host_type eq "node")
{
# The cluster isn't running, sleep and exit.
$anvil->Job->update_progress({
progress => 0,
message => "error_0222",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0222"});
sleep 10;
$anvil->nice_exit({exit_code => 1});
my $problem = $anvil->Cluster->parse_cib({debug => 2});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
if ($problem)
{
# The cluster isn't running, sleep and exit.
$anvil->Job->update_progress({
progress => 0,
message => "error_0222",
});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => 'err', key => "error_0222"});
sleep 10;
$anvil->nice_exit({exit_code => 1});
}
}
return(0);

@ -26,6 +26,7 @@ $anvil->Get->switches;
$anvil->Database->connect({debug => 3});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0132"});
$anvil->Cluster->shutdown_server({server => "srv01-test"});
#$anvil->DRBD->delete_resource({debug => 2, resource => "srv01-test"});
$anvil->nice_exit({exit_code => 0});

Loading…
Cancel
Save