* Working on getting live migration to work with ocf:anvil:striker using the environment variables that pacemaker sets. Incomplete, but getting close.

* Added support to Cluster->parce_cib() to track if maintenance mode is set.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 4 years ago
parent cc1e0e2f77
commit 47203490a9
  1. 11
      Anvil/Tools/Cluster.pm
  2. 10
      Anvil/Tools/Convert.pm
  3. 53
      Anvil/Tools/Server.pm
  4. 131
      ocf/alteeve/server
  5. 2
      share/words.xml
  6. 180
      tools/test.pl

@ -563,7 +563,9 @@ sub parse_cib
}});
}
}
# Set some cluster value defaults.
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = "false";
foreach my $nvpair_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}})
{
my $variable = $anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}{$nvpair_id}{name};
@ -595,6 +597,13 @@ sub parse_cib
"cib::parsed::data::cluster::name" => $anvil->data->{cib}{parsed}{data}{cluster}{name},
}});
}
if ($variable eq "maintenance-mode")
{
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::cluster::name" => $anvil->data->{cib}{parsed}{data}{cluster}{name},
}});
}
}
# Fencing devices and levels.

@ -637,8 +637,14 @@ sub host_name_to_ip
### TODO: Check local cached information later.
# Try to resolve it using 'gethostip'.
my $output = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{gethostip}." -d $host_name"});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { output => $output }});
my $shell_call = $anvil->data->{path}{exe}{gethostip}." -d ".$host_name;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { shell_call => $shell_call }});
my ($output, $return_code) = $anvil->System->call({shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
foreach my $line (split/\n/, $output)
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { line => $line }});

@ -636,8 +636,36 @@ sub migrate
return($success);
}
### TODO: Left off here, this is not allowing two primaries. I think the problem is 'source' is being
### mixed up in hashed between 'local' and the local machine's short host name. Switch
### everything away from 'local' to the short host name throughout the program.
if (not $anvil->data->{server}{$source}{$server})
{
# The 'target' below is where I'm reading the server's definition from, which is the
# migration source.
$anvil->Server->get_status({
debug => $debug,
server => $server,
target => $source,
});
}
foreach my $source (sort {$a cmp $b} keys %{$anvil->data->{server}})
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { source => $source }});
foreach my $server (sort {$a cmp $b} keys %{$anvil->data->{server}{$source}})
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { server => $server }});
foreach my $resource (sort {$a cmp $b} keys %{$anvil->data->{server}{$source}{$server}{resource}})
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { resource => $resource }});
}
}
}
die;
# Enable dual-primary for any resources we know about for this server.
foreach my $resource (sort {$a cmp $b} keys %{$anvil->data->{server}{$target}{$server}{resource}})
foreach my $resource (sort {$a cmp $b} keys %{$anvil->data->{server}{$source}{$server}{resource}})
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { resource => $resource }});
my ($return_code) = $anvil->DRBD->allow_two_primaries({
@ -646,9 +674,32 @@ sub migrate
});
}
# The virsh command switches host names to IPs and needs to have both the source and target IPs in
# the known_hosts file to work.
my $target_ip = $anvil->Convert->host_name_to_ip({debug => $debug, host_name => $target});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { target_ip => $target_ip }});
foreach my $host ($target, $target_ip)
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { host => $host }});
$anvil->Remote->add_target_to_known_hosts({
debug => $debug,
target => $host,
});
}
my $migration_command = $anvil->data->{path}{exe}{virsh}." migrate --undefinesource --tunnelled --p2p --live ".$server." qemu+ssh://".$target."/system";
if ($source)
{
my $source_ip = $anvil->Convert->host_name_to_ip({debug => $debug, host_name => $source});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { source_ip => $source_ip }});
foreach my $host ($source, $source_ip)
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { host => $host }});
$anvil->Remote->add_target_to_known_hosts({
debug => $debug,
target => $host,
});
}
$migration_command = $anvil->data->{path}{exe}{virsh}." -c qemu+ssh://root\@".$source."/system migrate --undefinesource --tunnelled --p2p --live ".$server." qemu+ssh://".$target."/system";
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { migration_command => $migration_command }});

@ -179,7 +179,7 @@ if ($anvil->data->{switches}{test3})
{
$anvil->data->{environment}{OCF_RESKEY_name} = "srv07-el6";
$anvil->data->{environment}{OCF_RESKEY_CRM_meta_on_node} = "mk-a02n01";
#print "Running test 3; Boot or shutdown of: [".$anvil->data->{environment}{OCF_RESKEY_name}."].\n";
print "Running test 3; Boot or shutdown of: [".$anvil->data->{environment}{OCF_RESKEY_name}."].\n";
}
if ($anvil->data->{switches}{test4})
{
@ -189,13 +189,13 @@ if ($anvil->data->{switches}{test4})
$anvil->data->{environment}{OCF_RESKEY_CRM_meta_timeout} = "20000";
$anvil->data->{environment}{OCF_RESKEY_CRM_meta_on_node} = "mk-a02n01";
$anvil->data->{environment}{OCF_RESKEY_name} = "srv07-el6";
#print "Status check of: [".$anvil->data->{environment}{OCF_RESKEY_name}."] on: [".$anvil->data->{environment}{OCF_RESKEY_CRM_meta_on_node}."].\n";
print "Status check of: [".$anvil->data->{environment}{OCF_RESKEY_name}."] on: [".$anvil->data->{environment}{OCF_RESKEY_CRM_meta_on_node}."].\n";
}
# This is for debugging.
if (not $anvil->data->{switches}{monitor})
{
show_environment($anvil, 1);
show_environment($anvil, 3);
}
### What are we being asked to do?
@ -211,7 +211,12 @@ if (not $anvil->data->{switches}{monitor})
# help -(usage maps here) Displays a usage message when the resource agent is invoked from the command line, rather than by the cluster manager.
# notify -Inform resource about changes in state of other clones.
if ($anvil->data->{switches}{start})
if (($anvil->data->{switches}{migrate_to}) or ($anvil->data->{switches}{migrate_from}))
{
# We don't support this, so we return OCF_ERR_UNIMPLEMENTED (3)
migrate_server($anvil);
}
elsif ($anvil->data->{switches}{start})
{
# Start the server
start_server($anvil);
@ -242,11 +247,6 @@ elsif ($anvil->data->{switches}{demote})
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0300", variables => { server => $anvil->data->{environment}{OCF_RESKEY_name} }});
$anvil->nice_exit({exit_code => 3});
}
elsif (($anvil->data->{switches}{migrate_to}) or ($anvil->data->{switches}{migrate_from}))
{
# We don't support this, so we return OCF_ERR_UNIMPLEMENTED (3)
migrate_server($anvil);
}
elsif ($anvil->data->{switches}{'validate-all'})
{
# Validate our local config and setup.
@ -288,7 +288,6 @@ sub check_daemons
{
my ($anvil, $task) = @_;
print "Parsing CIB\n";
my $problem = $anvil->Cluster->parse_cib();
if ($problem)
{
@ -301,7 +300,7 @@ sub check_daemons
# Is the peer running? We'll use this to know whether to try and start daemons on the peer.
my $peer_name = $anvil->data->{cib}{parsed}{peer}{name};
my $peer_ready = $anvil->data->{cib}{parsed}{peer}{ready};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
peer_name => $peer_name,
peer_ready => $peer_ready,
}});
@ -316,7 +315,7 @@ sub check_daemons
my $running_peer = 0;
my ($output, $return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
return_code => $return_code,
}});
@ -325,7 +324,7 @@ sub check_daemons
# It is stopped, start it..
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0482", variables => { daemon => $daemon }});
my ($output, $return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{systemctl}." start ".$daemon});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
return_code => $return_code,
}});
@ -335,7 +334,7 @@ sub check_daemons
until ($running)
{
my ($output, $return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
return_code => $return_code,
}});
@ -375,7 +374,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
error => $error,
return_code => $return_code,
@ -391,7 +390,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{systemctl}." start ".$daemon,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
error => $error,
return_code => $return_code,
@ -405,7 +404,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
error => $error,
return_code => $return_code,
@ -468,7 +467,7 @@ sub check_daemons
# Call virsh list --all
my ($local_output, $local_return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{virsh}." list --all"});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
local_output => $local_output,
local_return_code => $local_return_code,
}});
@ -478,12 +477,12 @@ sub check_daemons
foreach my $line (split/\n/, $local_output)
{
$line = $anvil->Words->clean_spaces({ string => $line });
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { line => $line }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { line => $line }});
if ($line =~ /(\d+)\s+(.*?)\s+running/)
{
$local_vm_count++;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { local_vm_count => $local_vm_count }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { local_vm_count => $local_vm_count }});
}
}
}
@ -492,7 +491,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{virsh}." list --all",
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
remote_output => $remote_output,
remote_error => $remote_error,
remote_return_code => $remote_return_code,
@ -503,17 +502,17 @@ sub check_daemons
foreach my $line (split/\n/, $remote_output)
{
$line = $anvil->Words->clean_spaces({ string => $line });
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { line => $line }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { line => $line }});
if ($line =~ /(\d+)\s+(.*?)\s+running/)
{
$remote_vm_count++;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { remote_vm_count => $remote_vm_count }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { remote_vm_count => $remote_vm_count }});
}
}
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
local_vm_count => $local_vm_count,
remote_vm_count => $remote_vm_count,
}});
@ -535,7 +534,7 @@ sub check_daemons
my $running_peer = 0;
my ($local_output, $local_return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
local_output => $local_output,
local_return_code => $local_return_code,
}});
@ -549,7 +548,7 @@ sub check_daemons
# Running, stop it.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0493", variables => { daemon => $daemon }});
my ($output, $return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{systemctl}." stop ".$daemon});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
return_code => $return_code,
}});
@ -559,7 +558,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{systemctl}." status ".$daemon,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
remote_output => $remote_output,
remote_error => $remote_error,
remote_return_code => $remote_return_code,
@ -583,7 +582,7 @@ sub check_daemons
target => $peer_name,
shell_call => $anvil->data->{path}{exe}{systemctl}." stop ".$daemon,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
output => $output,
error => $error,
return_code => $return_code,
@ -941,10 +940,11 @@ sub stop_server
# Stopping the server is simply a question of "is the server running?" and, if so, stop it. Once
# stopped, we stop the DRBD resource on both nodes.
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { server => $server }});
# Read in an parse the server's XML.
$anvil->System->check_storage({debug => 3});
$anvil->Server->get_status({debug => 3, server => $server});
$anvil->Server->get_status({debug => 2, server => $server});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0313", variables => { server => $server }});
my $success = $anvil->Server->shutdown({debug => 3, server => $server});
@ -1160,6 +1160,14 @@ sub migrate_server
# Before migrating, make sure the daemons are running on the peer.
check_daemons($anvil, "start");
# Make sure switches are at least defined.
$anvil->data->{switches}{migrate_to} = "" if not defined $anvil->data->{switches}{migrate_to};
$anvil->data->{switches}{migrate_from} = "" if not defined $anvil->data->{switches}{migrate_from};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
'switches::migrate_to' => $anvil->data->{switches}{migrate_to},
'switches::migrate_from' => $anvil->data->{switches}{migrate_from},
}});
### NOTE: For now, we're not going to block if the target is not UpToDate. There are times when a
### user might want to do this (ie: sync will be done soon and the need to evacuate the node
### ASAP is high). Maybe we'll enforce this and require a '--force' switch later?
@ -1173,12 +1181,10 @@ sub migrate_server
my $target = $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target};
my $meta_on_node = $anvil->data->{environment}{OCF_RESKEY_CRM_meta_on_node};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
server => $server,
source => $source,
target => $target,
meta_on_node => $meta_on_node,
'switches::migrate_to' => $anvil->data->{switches}{migrate_to},
'switches::migrate_from' => $anvil->data->{switches}{migrate_from},
server => $server,
source => $source,
target => $target,
meta_on_node => $meta_on_node,
}});
# The actual migration command will involve enabling dual primary, then beginning the migration. The
@ -1186,7 +1192,7 @@ sub migrate_server
# success or failure, dual primary will be disabled again.
my $migration_command = "";
my $migrated = 0;
if ($anvil->data->{switches}{migrate_to})
if ($target)
{
# Can I even connect to the target?
my ($access) = $anvil->Remote->test_access({debug => 3, target => $target});
@ -1340,11 +1346,12 @@ sub migrate_server
($migrated) = $anvil->Server->migrate({
debug => 2,
server => $server,
source => $source,
target => $target
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { migrated => $migrated }});
}
elsif ($anvil->data->{switches}{migrate_from})
elsif ($source)
{
# Pull the server here. Start by verifying it's on the 'meta_on_node' host.
# Scan locally and on our peer
@ -1353,7 +1360,7 @@ sub migrate_server
my $host = defined $anvil->data->{server}{location}{$server}{host} ? $anvil->data->{server}{location}{$server}{host} : "";
my $short_host = ($host =~ /^(.*?)\..*$/)[0];
my $status = defined $anvil->data->{server}{location}{$server}{status} ? $anvil->data->{server}{location}{$server}{status} : "";
my $status = defined $anvil->data->{server}{location}{$server}{status} ? $anvil->data->{server}{location}{$server}{status} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
host => $host,
short_host => $short_host,
@ -1362,7 +1369,6 @@ sub migrate_server
}});
# Convert the host to a short name, in case the node's name is the short version.
my $server_host = defined $anvil->data->{server}{location}{$server}{host} ? $anvil->data->{server}{location}{$server}{host} : "";
my $server_status = defined $anvil->data->{server}{location}{$server}{status} ? $anvil->data->{server}{location}{$server}{status} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
@ -1421,10 +1427,18 @@ sub validate_all
### TODO: When we have actual Anvil! systems, connect to the peers (nodes / DR) for this host and see
### if the server is running elsewhere.
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $source = defined $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_source} ? $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_source} : "";
my $target = defined $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} ? $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
server => $server,
source => $source,
target => $target,
}});
# Read in an parse the server's XML.
$anvil->System->check_storage({debug => 3});
$anvil->Server->get_status({debug => 3, server => $anvil->data->{environment}{OCF_RESKEY_name}});
$anvil->System->check_storage({debug => 2});
$anvil->Server->get_status({debug => 2, server => $server});
# Is the name in the definition file what we expect (and did we read the XML data at all)?
validate_name($anvil);
@ -1433,7 +1447,7 @@ sub validate_all
validate_emulator($anvil);
# These tests are only needed if we're about to boot the server
if (($anvil->data->{switches}{start}) or ($anvil->data->{switches}{migrate_from}))
if (($anvil->data->{switches}{start}) or ($source))
{
# Check that we have enough RAM.
validate_ram($anvil);
@ -1497,28 +1511,33 @@ sub validate_storage
my ($anvil) = @_;
# When checking on a running server, use 'from_memory'.
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $source = "from_disk";
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $target = defined $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} ? $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
server => $server,
target => $target,
}});
my $xml_source = "from_disk";
if ($anvil->data->{server}{'local'}{$server}{from_memory}{host})
{
$source = "from_memory";
$xml_source = "from_memory";
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
server => $server,
source => $source,
server => $server,
xml_source => $xml_source,
}});
### TODO: If we're called with a status and find an ISO file missing and eject it instead of failing.
### For now, we just fault out.
# Do the optical discs in the drive exist? If not, we'll eject it if we're about to boot and fail if
# we're about to migrate. We skip this check if we're migrating off or shutting down the server.
if ((exists $anvil->data->{server}{'local'}{$server}{$source}{device}{cdrom}) && (not $anvil->data->{switches}{migrate_to}) && (not $anvil->data->{switches}{stop}))
if ((exists $anvil->data->{server}{'local'}{$server}{$xml_source}{device}{cdrom}) && (not $target) && (not $anvil->data->{switches}{stop}))
{
foreach my $device_target (sort {$a cmp $b} keys %{$anvil->data->{server}{'local'}{$server}{$source}{device}{cdrom}{target}})
foreach my $device_target (sort {$a cmp $b} keys %{$anvil->data->{server}{'local'}{$server}{$xml_source}{device}{cdrom}{target}})
{
if ($anvil->data->{server}{'local'}{$server}{$source}{device}{cdrom}{target}{$device_target}{path})
if ($anvil->data->{server}{'local'}{$server}{$xml_source}{device}{cdrom}{target}{$device_target}{path})
{
my $file = $anvil->data->{server}{'local'}{$server}{$source}{device}{cdrom}{target}{$device_target}{path};
my $file = $anvil->data->{server}{'local'}{$server}{$xml_source}{device}{cdrom}{target}{$device_target}{path};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { file => $file }});
if (not -e $file)
{
@ -1555,9 +1574,9 @@ sub validate_storage_drbd
my ($anvil) = @_;
# Now check storage.
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $source = "from_disk";
my $host = $anvil->_short_host_name;
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $xml_source = "from_disk";
my $host = $anvil->_short_host_name;
# Did I find a resource for each disk?
foreach my $device_path (sort {$a cmp $b} keys %{$anvil->data->{server}{'local'}{$server}{device}})
@ -1573,9 +1592,9 @@ sub validate_storage_drbd
}
}
foreach my $device_target (sort {$a cmp $b} keys %{$anvil->data->{server}{'local'}{$server}{$source}{device}{disk}{target}})
foreach my $device_target (sort {$a cmp $b} keys %{$anvil->data->{server}{'local'}{$server}{$xml_source}{device}{disk}{target}})
{
my $drbd_device = $anvil->data->{server}{'local'}{$server}{$source}{device}{disk}{target}{$device_target}{path};
my $drbd_device = $anvil->data->{server}{'local'}{$server}{$xml_source}{device}{disk}{target}{$device_target}{path};
my $drbd_resource = defined $anvil->data->{drbd}{config}{$host}{drbd_path}{$drbd_device}{resource} ? $anvil->data->{drbd}{config}{$host}{drbd_path}{$drbd_device}{resource} : "";
my $on_lv = defined $anvil->data->{drbd}{config}{$host}{drbd_path}{$drbd_device}{on} ? $anvil->data->{drbd}{config}{$host}{drbd_path}{$drbd_device}{on} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {

@ -955,7 +955,7 @@ If the targets are unique, did you copy the full database directory? A unique id
<key name="log_0486">Starting the daemon: [#!variable!daemon!#] on: [#!variable!host!#].</key>
<key name="log_0487">Verifying that the daemon: [#!variable!daemon!#] has started on: [#!variable!host!#].</key>
<key name="log_0488">Waiting for the daemon: [#!variable!daemon!#] to start on: [#!variable!host!#]...</key>
<key name="log_0489">The daemon: [#!variable!daemon!#] was already running on: [#!variable!host!#], no need to start.</key>
<key name="log_0489">The daemon: [#!variable!daemon!#] was already running on: [#!variable!host!#], no need to start.</key>
<key name="log_0490">There are no servers running on either node, stopping daemons.</key>
<key name="log_0491">There are no servers running on locally and the peer is not in the cluster, stopping daemons.</key>
<key name="log_0492">The daemon: [#!variable!daemon!#] is already stopped locally, nothing to do.</key>

@ -29,9 +29,181 @@ $anvil->Database->connect();
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, 'print' => 1, level => 2, secure => 0, key => "log_0132"});
$anvil->Get->switches;
#$anvil->DRBD->get_devices({ debug => 2 }); die;
$anvil->Server->get_status({
debug => 2,
server => "srv01-sql",
my $xml = '<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="322" num_updates="6" admin_epoch="0" cib-last-written="Sun Aug 16 18:24:22 2020" update-origin="mk-a02n01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="true"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1597445952"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01"/>
<node id="2" uname="mk-a02n02"/>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="600"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" timeout="60"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="cli-prefer-srv07-el6" node="mk-a02n02" role="Started" rsc="srv07-el6" score="INFINITY"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;8:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="2" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="17:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;17:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1597616558" last-run="1597616558" exec-time="598" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;10:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;21:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="31" rc-code="0" op-status="0" interval="0" last-rc-change="1597616558" last-run="1597616558" exec-time="98" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;12:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;25:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="32" rc-code="0" op-status="0" interval="0" last-rc-change="1597616558" last-run="1597616558" exec-time="591" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="14:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;14:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n02" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1597616558" last-run="1597616558" exec-time="536" queue-time="1" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;15:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="30" rc-code="0" op-status="0" interval="0" last-rc-change="1597616559" last-run="1597616559" exec-time="113" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;2:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;19:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="31" rc-code="0" op-status="0" interval="0" last-rc-change="1597616559" last-run="1597616559" exec-time="616" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;4:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:0;23:0:0:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="32" rc-code="0" op-status="0" interval="0" last-rc-change="1597616559" last-run="1597616559" exec-time="617" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;6:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1597616557" last-run="1597616557" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" transition-magic="0:7;7:0:7:fd70072a-cb46-4085-89db-f7c0073f77ce" exit-reason="" on_node="mk-a02n01" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1597616559" last-run="1597616559" exec-time="541" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
';
$anvil->Cluster->parse_cib({
debug => 2,
#cib => $xml,
});
print "Cluster is in maintenance mode? [".$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'}."]\n";

Loading…
Cancel
Save