* Created Cluster->parse_crm_mon and updated Cluster->parse_cib() to determine what state a server is in and which host has a server.

* Added support in anvil.conf to disable scan agents with 'scancore::<agent_name>::disable', and added handling this to agents. Also allowed for '--force' to override this setting.
* Updated ScanCore->agent_startup() to allow for empty scan agent table lists.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 4 years ago
parent 4dfe0cb5a0
commit e240a32a19
  1. 2
      Anvil/Tools.pm
  2. 293
      Anvil/Tools/Cluster.pm
  3. 44
      Anvil/Tools/ScanCore.pm
  4. 12
      scancore-agents/scan-cluster/scan-cluster
  5. 15
      scancore-agents/scan-hardware/scan-hardware
  6. 101
      scancore-agents/scan-server/scan-server
  7. 1
      scancore-agents/scan-server/scan-server.sql
  8. 30
      scancore-agents/scan-server/scan-server.xml
  9. 12
      share/words.xml
  10. 309
      tools/test.pl

@ -1138,6 +1138,8 @@ sub _set_paths
createrepo_c => "/usr/bin/createrepo_c",
createuser => "/usr/bin/createuser",
crm_error => "/usr/sbin/crm_error",
crm_resource => "/usr/sbin/crm_resource",
crm_mon => "/usr/sbin/crm_mon",
dmidecode => "/usr/sbin/dmidecode",
dnf => "/usr/bin/dnf",
drbdadm => "/usr/sbin/drbdadm",

@ -1046,7 +1046,9 @@ sub parse_cib
}});
}
# Hosted server information
# Hosted server information... We can only get basic information out of the CIB, so we'll use crm_mon
# for details. We don't just rely on 'crm_mon' however, as servers that aren't running will not (yet)
# show there.
foreach my $id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}})
{
my $node_name = $anvil->data->{cib}{parsed}{configuration}{nodes}{$id}{uname};
@ -1074,132 +1076,197 @@ sub parse_cib
# This will be updated below if the server is running.
if (not exists $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id})
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status} = "off";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} = "unknown";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} = "-1";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code} = "-1";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code} = "-1";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status} = "off";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host_name} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host_id} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{active} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{blocked} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{failed} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{managed} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{orphaned} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{role} = "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::status" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status},
"cib::parsed::data::server::${lrm_resource_id}::host" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host},
"cib::parsed::data::server::${lrm_resource_id}::last_monitor_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code},
"cib::parsed::data::server::${lrm_resource_id}::last_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_operation_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_return_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code},
"cib::parsed::data::server::${lrm_resource_id}::status" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status},
"cib::parsed::data::server::${lrm_resource_id}::host_name" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host_name},
"cib::parsed::data::server::${lrm_resource_id}::host_id" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host_id},
"cib::parsed::data::server::${lrm_resource_id}::active" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{active},
"cib::parsed::data::server::${lrm_resource_id}::blocked" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{blocked},
"cib::parsed::data::server::${lrm_resource_id}::failed" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{failed},
"cib::parsed::data::server::${lrm_resource_id}::managed" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{managed},
"cib::parsed::data::server::${lrm_resource_id}::orphaned" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{orphaned},
"cib::parsed::data::server::${lrm_resource_id}::role" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{role},
}});
}
# If there are two LRM resource operation IDs, then the server is
# running on this node. Generally (always?) there will be a
# '$lrm_rsc_op_id' called '<server>_last_0'. If there is a second one
# with '_monitor' in it, the server is running locally (we always have
# a monitor operation defined).
if (($lrm_resource_operations_count > 1) && ($lrm_rsc_op_id !~ /_last_/))
{
# The server is (should be) running.
# - return code is from the RA's last status check.
# - exit-reason is the STDERR of the RA
# -
my $last_return_code = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
my $status = "unknown";
if ($last_return_code eq "0")
{
$status = "running";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { status => $status }});
}
elsif ($last_return_code eq "7")
{
$status = "stopped";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { status => $status }});
}
else
{
$status = "error_condition - rc: ".$last_return_code;
# Log all variables in case there is anything useful.
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 1, list => { status => $status }});
foreach my $variable (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}})
{
my $value = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 1, list => {
"cib::parsed::cib::status::node_state::${id}::lrm_id::${lrm_id}::lrm_resource::${lrm_resource_id}::lrm_rsc_op_id::${lrm_rsc_op_id}::${variable}" => $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable},
}});
}
}
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status} = $status;
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'on_node'};
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::status" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status},
"cib::parsed::data::server::${lrm_resource_id}::host" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host},
"cib::parsed::data::server::${lrm_resource_id}::last_monitor_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code},
}});
}
elsif ($lrm_rsc_op_id =~ /_last_failure_/)
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation} = $operation;
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_failure_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_return_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code},
}});
}
else
{
# This isn't a monirot operation, so it will contain the most
# recent data on the server.
if ($anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} eq "unknown")
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} = $operation;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
}});
}
if ($anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} eq "-1")
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_operation_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code},
}});
}
}
print "Node: [".$node_name."] (".$id."), lrm_id: [".$lrm_id."], lrm_resource_id: [".$lrm_resource_id."] (type: [".$type."], class: [".$class."]), lrm_rsc_op_id: [".$lrm_rsc_op_id."] (".$lrm_resource_operations_count.")\n";
foreach my $variable (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}})
{
my $value = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable};
print "- Variable: [".$variable."], value: [".$value."]\n";
}
}
}
}
}
# Now call 'crm_mon --output-as=xml' to determine which resource are running where. As of the time
# of writting this (late 2020), stopped resources are not displayed. So the principle purpose of this
# call is to determine what resources are running, and where they are running.
$anvil->Cluster->parse_crm_mon({debug => $debug});
foreach my $server (sort {$a cmp $b} keys %{$anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}})
{
my $host_name = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{host}{node_name};
my $host_id = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{host}{node_id};
my $role = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{role};
my $active = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{active} eq "true" ? 1 : 0;
my $blocked = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{blocked} eq "true" ? 1 : 0;
my $failed = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{failed} eq "true" ? 1 : 0;
my $managed = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{managed} eq "true" ? 1 : 0;
my $orphaned = $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$server}{variables}{orphaned} eq "true" ? 1 : 0;
my $status = lc($role);
if ((lc($role) eq "started") or (lc($role) eq "starting"))
{
$status = "on";
}
=cut
2020/09/24 18:14:42:Cluster.pm:1154; Variables:
|- server: ..... [srv07-el6]
|- host_name: .. [mk-a02n02] <- Old host
|- status: ..... [migrating]
|- role: ....... [Migrating]
\- active: ..... [1]
=cut
$anvil->data->{cib}{parsed}{data}{server}{$server}{status} = $status;
$anvil->data->{cib}{parsed}{data}{server}{$server}{host_name} = $host_name;
$anvil->data->{cib}{parsed}{data}{server}{$server}{host_id} = $host_id;
$anvil->data->{cib}{parsed}{data}{server}{$server}{role} = $role;
$anvil->data->{cib}{parsed}{data}{server}{$server}{active} = $active;
$anvil->data->{cib}{parsed}{data}{server}{$server}{blocked} = $blocked;
$anvil->data->{cib}{parsed}{data}{server}{$server}{failed} = $failed;
$anvil->data->{cib}{parsed}{data}{server}{$server}{managed} = $managed;
$anvil->data->{cib}{parsed}{data}{server}{$server}{orphaned} = $orphaned;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${server}::status" => $anvil->data->{cib}{parsed}{data}{server}{$server}{status},
"cib::parsed::data::server::${server}::host_name" => $anvil->data->{cib}{parsed}{data}{server}{$server}{host_name},
"cib::parsed::data::server::${server}::host_id" => $anvil->data->{cib}{parsed}{data}{server}{$server}{host_id},
"cib::parsed::data::server::${server}::role" => $anvil->data->{cib}{parsed}{data}{server}{$server}{role},
"cib::parsed::data::server::${server}::active" => $anvil->data->{cib}{parsed}{data}{server}{$server}{active},
"cib::parsed::data::server::${server}::blocked" => $anvil->data->{cib}{parsed}{data}{server}{$server}{blocked},
"cib::parsed::data::server::${server}::failed" => $anvil->data->{cib}{parsed}{data}{server}{$server}{failed},
"cib::parsed::data::server::${server}::managed" => $anvil->data->{cib}{parsed}{data}{server}{$server}{managed},
"cib::parsed::data::server::${server}::orphaned" => $anvil->data->{cib}{parsed}{data}{server}{$server}{orphaned},
}});
}
# Debug code.
foreach my $server (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{data}{server}})
{
my $last_operation = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_operation};
my $last_operation_rc_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_operation_rc_code};
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
my $last_monitor_rc_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_monitor_rc_code};
my $last_failure_operation = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_failure_operation};
my $last_failure_return_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_failure_return_code};
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host_name = $anvil->data->{cib}{parsed}{data}{server}{$server}{host_name};
my $role = $anvil->data->{cib}{parsed}{data}{server}{$server}{role};
my $active = $anvil->data->{cib}{parsed}{data}{server}{$server}{active};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:server' => $server,
's2:status' => $status,
's2:host_name' => $host_name,
's4:role' => $role,
's5:active' => $active,
}});
}
return($problem);
}
=head2 parse_crm_mon
This reads in the XML output of C<< crm_mon >> and parses it. On success, it returns C<< 0 >>. On failure (ie: pcsd isn't running), returns C<< 1 >>.
B<< Note >>: At this time, this method only pulls out the host for running servers. More data may be parsed out at a future time.
Parameters;
=head3 xml (optional)
B<< Note >>: Generally this should not be used.
By default, the C<< crm_mon --output-as=xml >> is read directly. However, this parameter can be used to pass in raw XML instead. If this is set, C<< crm_mon >> is B<< NOT >> invoked.
=cut
sub parse_crm_mon
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Cluster->parse_crm_mon()" }});
my $xml = defined $parameter->{xml} ? $parameter->{xml} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
xml => $xml,
}});
my $problem = 1;
my $crm_mon_data = "";
my $return_code = 0;
if ($xml)
{
$crm_mon_data = $xml;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { crm_mon_data => $crm_mon_data }});
}
else
{
my $shell_call = $anvil->data->{path}{exe}{crm_mon}." --output-as=xml";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { shell_call => $shell_call }});
($crm_mon_data, $return_code) = $anvil->System->call({debug => 3, shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:server' => $server,
's2:host' => $host,
's3:status' => $status,
's4:last_monitor_rc_code' => $last_monitor_rc_code,
's5:last_operation' => $last_operation,
's6:last_operation_rc_code' => $last_operation_rc_code,
's7:last_failure_operation' => $last_failure_operation,
's8:last_failure_return_code' => $last_failure_return_code,
crm_mon_data => $crm_mon_data,
return_code => $return_code,
}});
}
if ($return_code)
{
# Failed to read the CIB.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "warning_0062"});
}
else
{
local $@;
my $dom = eval { XML::LibXML->load_xml(string => $crm_mon_data); };
if ($@)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "warning_0063", variables => {
xml => $crm_mon_data,
error => $@,
}});
}
else
{
# Successful parse!
$problem = 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
foreach my $resource ($dom->findnodes('/pacemaker-result/resources/resource'))
{
next if $resource->{resource_agent} ne "ocf::alteeve:server";
my $id = $resource->{id};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { id => $id }});
foreach my $variable (sort {$a cmp $b} keys %{$resource})
{
next if $variable eq "id";
$anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{variables}{$variable} = $resource->{$variable};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"crm_mon::parsed::pacemaker-result::resources::resource::${id}::variables::${variable}" => $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{variables}{$variable},
}});
}
foreach my $node ($resource->findnodes('./node'))
{
my $node_id = $node->{id};
my $node_name = $node->{name};
my $cached = $node->{cached};
$anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{host}{node_name} = $node->{name};
$anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{host}{node_id} = $node->{id};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"crm_mon::parsed::pacemaker-result::resources::resource::${id}::host::node_name" => $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{host}{node_name},
"crm_mon::parsed::pacemaker-result::resources::resource::${id}::host::node_id" => $anvil->data->{crm_mon}{parsed}{'pacemaker-result'}{resources}{resource}{$id}{host}{node_id},
}});
}
}
}
}
return($problem);
}

@ -126,33 +126,37 @@ sub agent_startup
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "ScanCore->agent_startup()", parameter => "agent" }});
return("!!error!!");
}
if ((not $tables) or (ref($tables) ne "ARRAY") or (@{$tables} == 0))
if ((not $tables) or (ref($tables) ne "ARRAY"))
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "ScanCore->agent_startup()", parameter => "tables" }});
return("!!error!!");
}
# Append our tables
foreach my $table (@{$tables})
# It's possible that some agents don't have a database (or use core database tables only)
if (@{$tables} > 0)
{
push @{$anvil->data->{sys}{database}{check_tables}}, $table;
# Append our tables
foreach my $table (@{$tables})
{
push @{$anvil->data->{sys}{database}{check_tables}}, $table;
}
# Connect to DBs.
$anvil->Database->connect({debug => $debug});
$anvil->Log->entry({source => $agent, line => __LINE__, level => $debug, secure => 0, key => "log_0132"});
if (not $anvil->data->{sys}{database}{connections})
{
# No databases, exit.
$anvil->Log->entry({source => $agent, line => __LINE__, 'print' => 1, level => 0, secure => 0, key => "error_0003"});
return(1);
}
# Make sure our schema is loaded.
$anvil->Database->check_agent_data({
debug => $debug,
agent => $agent,
});
}
# Connect to DBs.
$anvil->Database->connect({debug => $debug});
$anvil->Log->entry({source => $agent, line => __LINE__, level => $debug, secure => 0, key => "log_0132"});
if (not $anvil->data->{sys}{database}{connections})
{
# No databases, exit.
$anvil->Log->entry({source => $agent, line => __LINE__, 'print' => 1, level => 0, secure => 0, key => "error_0003"});
return(1);
}
# Make sure our schema is loaded.
$anvil->Database->check_agent_data({
debug => $debug,
agent => $agent,
});
# Read in our word strings.
my $words_file = $anvil->data->{path}{directories}{scan_agents}."/".$agent."/".$agent.".xml";

@ -34,11 +34,21 @@ my $anvil = Anvil::Tools->new({log_level => 2, log_secure => 1});
$anvil->Log->level({set => 2});
$anvil->Log->secure({set => 1});
$anvil->data->{scancore}{'scan-cluster'}{disable} = 0;
$anvil->data->{switches}{force} = 0;
$anvil->Storage->read_config();
# Read switches
$anvil->Get->switches;
# If we're disabled and '--force' wasn't used, exit.
if (($anvil->data->{scancore}{'scan-cluster'}{disable}) && (not $anvil->data->{switches}{force}))
{
# Exit.
$anvil->nice_exit({exit_code => 0});
}
# These are the tables used by this agent. The order matters as it controls to order the tables are created
# and sync'ed. For purges, this array is walked backwards.
$anvil->data->{scancore}{'scan-cluster'}{tables} = ["scan_cluster"];
@ -141,4 +151,4 @@ sub collect_data
return(0);
}
}

@ -32,11 +32,13 @@ $anvil->Log->level({set => 2});
$anvil->Log->secure({set => 1});
# These are the threasholds for when to alert when swap is running out.
$anvil->data->{scancore}{'scan-hardware'}{swap}{high_threshold} = 75;
$anvil->data->{scancore}{'scan-hardware'}{swap}{clear_threshold} = 25;
$anvil->data->{scancore}{'scan-hardware'}{ram}{high_threshold} = 1073741824; # 1 GiB
$anvil->data->{scancore}{'scan-hardware'}{disable} = 0;
$anvil->data->{scancore}{'scan-hardware'}{ram}{clear_threshold} = 134217728; # 128 MiB
$anvil->data->{scancore}{'scan-hardware'}{ram}{high_threshold} = 1073741824; # 1 GiB
$anvil->data->{scancore}{'scan-hardware'}{score}{less_ram} = 5;
$anvil->data->{scancore}{'scan-hardware'}{swap}{clear_threshold} = 25;
$anvil->data->{scancore}{'scan-hardware'}{swap}{high_threshold} = 75;
$anvil->data->{switches}{force} = 0;
$anvil->Storage->read_config();
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0115", variables => { program => $THIS_FILE }});
@ -44,6 +46,13 @@ $anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "
# Read switches
$anvil->Get->switches;
# If we're disabled and '--force' wasn't used, exit.
if (($anvil->data->{scancore}{'scan-hardware'}{disable}) && (not $anvil->data->{switches}{force}))
{
# Exit.
$anvil->nice_exit({exit_code => 0});
}
# These are the tables used by this agent. The order matters as it controls to order the tables are created
# and sync'ed. For purges, this array is walked backwards.
$anvil->data->{scancore}{'scan-hardware'}{tables} = ["scan_hardware", "scan_hardware_ram_modules"];

@ -0,0 +1,101 @@
#!/usr/bin/perl
#
# This scans the nodes and DR host for VMs
#
# NOTE: The data stored here is not bound to a given host. As such, only hosted VMs are processed.
#
# Examples;
#
# Exit codes;
# 0 = Normal exit.
# 1 = Startup failure (no DB, bad file read, etc)
#
# TODO:
# -
#
use strict;
use warnings;
use Anvil::Tools;
use Data::Dumper;
# Disable buffering
$| = 1;
my $THIS_FILE = ($0 =~ /^.*\/(.*)$/)[0];
my $running_directory = ($0 =~ /^(.*?)\/$THIS_FILE$/)[0];
if (($running_directory =~ /^\./) && ($ENV{PWD}))
{
$running_directory =~ s/^\./$ENV{PWD}/;
}
my $anvil = Anvil::Tools->new({log_level => 2, log_secure => 1});
$anvil->Log->level({set => 2});
$anvil->Log->secure({set => 1});
$anvil->data->{scancore}{'scan-server'}{disable} = 0;
$anvil->data->{switches}{force} = 0;
$anvil->Storage->read_config();
# Read switches
$anvil->Get->switches;
# If we're disabled and '--force' wasn't used, exit.
if (($anvil->data->{scancore}{'scan-server'}{disable}) && (not $anvil->data->{switches}{force}))
{
# Exit.
$anvil->nice_exit({exit_code => 0});
}
# This scan agent only uses core tables (server and definitions).
$anvil->data->{scancore}{'scan-server'}{tables} = [""];
# Handle start-up tasks
my $problem = $anvil->ScanCore->agent_startup({
debug => 3,
agent => $THIS_FILE,
tables => $anvil->data->{scancore}{'scan-server'}{tables},
});
if ($problem)
{
$anvil->nice_exit({exit_code => 1});
}
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_server_log_0001", variables => { program => $THIS_FILE }});
# There are no tables for this agent, so '--purge' is useless here.
# Before we do anything, are we a node or a DR host?
my $host_type = $anvil->Get->host_type;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { host_type => $host_type }});
if ($host_type eq "striker")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_server_log_0002", variables => { host_type => $host_type }});
$anvil->nice_exit({exit_code => 0});
}
# Read the data.
collect_data($anvil);
# Read last scan
$anvil->nice_exit({exit_code => 0});
#############################################################################################################
# Functions #
#############################################################################################################
# This reads in all the data we can find on the local system
sub collect_data
{
my ($anvil) = @_;
return(0);
}

@ -0,0 +1 @@
-- There are no tables needed by scan-server, it only uses core tables.

@ -0,0 +1,30 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Company: Alteeve's Niche, Inc.
License: GPL v2+
Author: Madison Kelly <mkelly@alteeve.ca>
NOTE: All string keys MUST be prefixed with the agent name! ie: 'scan_server_log_0001'.
-->
<words>
<meta version="3.0.0" languages="en_CA,jp"/>
<!-- Canadian English -->
<language name="en_CA" long_name="Canadian English" description="ScanCore scan agent that monitors hardware, like RAM modules, CSS LED status, CPU information, etc.">
<!-- Alert entries -->
<key name="scan_server_alert_0001"></key>
<!-- Log entries -->
<key name="scan_server_log_0001">Starting: [#!variable!program!#].</key>
<key name="scan_server_log_0002">This host is a: [#!variable!host_type!#], this agent is only useful on nodes and DR hosts. Exiting.</key>
<!-- Message entries (usually meant to be alerts) -->
<key name="scan_server_message_0001">We're node 2, and node 1 is running as well. Exiting as only one node needs to run this agent.</key>
<!-- Units -->
<key name="scan_server_unit_0001"></key>
</language>
</words>

@ -1840,6 +1840,18 @@ The error was:
<key name="warning_0059">[ Warning ] - The server: [#!variable!server!#] was asked to be booted on: [#!variable!requested_node!#], but it is is already running on: [#!variable!current_host!#].</key>
<key name="warning_0060">[ Warning ] - The server: [#!variable!server!#] was asked to be shutdown, but it's in an unexpected state: [#!variable!state!#] on the host: [#!variable!current_host!#]. Aborting.</key>
<key name="warning_0061">[ Warning ] - The server: [#!variable!server!#] was asked to be migrated to: [#!variable!requested_node!#], but the server is off. Aborting.</key>
<key name="warning_0062">[ Warning ] - Failed to read the 'crm_mon' output. Is the cluster started?</key>
<key name="warning_0063">[ Warning ] - Failed to parse the XML output from 'crm_mon'. The XML read was:
========
#!variable!xml!#
========
The error was:
========
#!variable!error!#
========
</key>
</language>
<!-- 日本語 -->

@ -29,292 +29,29 @@ print "Connecting to the database(s);\n";
$anvil->Database->connect({debug => 3});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, 'print' => 1, level => 2, secure => 0, key => "log_0132"});
$anvil->Cluster->shutdown_server({
debug => 2,
server => "srv07-el6",
});
$anvil->Cluster->shutdown_server({
debug => 2,
server => "srv01-sql",
});
exit;
# $anvil->Cluster->shutdown_server({
# debug => 2,
# server => "srv07-el6",
# });
# $anvil->Cluster->shutdown_server({
# debug => 2,
# server => "srv01-sql",
# });
# exit;
my $cib = '<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="453" num_updates="8" admin_epoch="0" cib-last-written="Thu Sep 24 01:26:31 2020" update-origin="mk-a02n01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1600924958"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01"/>
<node id="2" uname="mk-a02n02"/>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv01-sql" provider="alteeve" type="server">
<instance_attributes id="srv01-sql-instance_attributes">
<nvpair id="srv01-sql-instance_attributes-name" name="name" value="srv01-sql"/>
</instance_attributes>
<meta_attributes id="srv01-sql-meta_attributes">
<nvpair id="srv01-sql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv01-sql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv01-sql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv01-sql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv01-sql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv01-sql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv01-sql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv01-sql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv02-lab1" provider="alteeve" type="server">
<instance_attributes id="srv02-lab1-instance_attributes">
<nvpair id="srv02-lab1-instance_attributes-name" name="name" value="srv02-lab1"/>
</instance_attributes>
<meta_attributes id="srv02-lab1-meta_attributes">
<nvpair id="srv02-lab1-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv02-lab1-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv02-lab1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv02-lab1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv02-lab1-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv02-lab1-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv02-lab1-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv02-lab1-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv08-m2-psql" provider="alteeve" type="server">
<instance_attributes id="srv08-m2-psql-instance_attributes">
<nvpair id="srv08-m2-psql-instance_attributes-name" name="name" value="srv08-m2-psql"/>
</instance_attributes>
<meta_attributes id="srv08-m2-psql-meta_attributes">
<nvpair id="srv08-m2-psql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv08-m2-psql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv08-m2-psql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv08-m2-psql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv08-m2-psql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv08-m2-psql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv08-m2-psql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv08-m2-psql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="location-srv08-m2-psql-mk-a02n01-200" node="mk-a02n01" rsc="srv08-m2-psql" score="200"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n02-100" node="mk-a02n02" rsc="srv08-m2-psql" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n02-100" node="mk-a02n02" rsc="srv01-sql" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n01-200" node="mk-a02n01" rsc="srv01-sql" score="200"/>
<rsc_location id="location-srv02-lab1-mk-a02n02-100" node="mk-a02n02" rsc="srv02-lab1" score="100"/>
<rsc_location id="location-srv02-lab1-mk-a02n01-200" node="mk-a02n01" rsc="srv02-lab1" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n01-200" node="mk-a02n01" rsc="srv07-el6" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n02-100" node="mk-a02n02" rsc="srv07-el6" score="100"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;21:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="115" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
<lrm_rsc_op id="ipmilan_node1_monitor_60000" operation_key="ipmilan_node1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;22:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870208" exec-time="90" queue-time="0" op-digest="7064441a5f8ccc94d13cc9a1433de0a5" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;2:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;25:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="907" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_monitor_60000" operation_key="apc_snmp_node1_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="26:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;26:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="1175" queue-time="0" op-digest="da20bfed231d75a3b22f97eb06bb445f" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;4:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;29:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="874" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_monitor_60000" operation_key="apc_snmp_node2_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="30:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;30:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="789" queue-time="0" op-digest="5b8d168b9627dad87e1ba2edace17f1e" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;6:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_migrate_from_0" operation="migrate_from" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;25:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="75" rc-code="0" op-status="0" interval="0" last-rc-change="1600925198" last-run="1600925198" exec-time="551" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f" migrate_source="mk-a02n02" migrate_target="mk-a02n01"/>
<lrm_rsc_op id="srv07-el6_monitor_60000" operation_key="srv07-el6_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="76" rc-code="0" op-status="0" interval="60000" last-rc-change="1600925201" exec-time="541" queue-time="0" op-digest="65d0f0c9227f2593835f5de6c9cb9d0e"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;10:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="593" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="547" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_last_failure_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="547" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_monitor_60000" operation_key="srv01-sql_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:79:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;24:79:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="69" rc-code="0" op-status="0" interval="60000" last-rc-change="1600924960" exec-time="564" queue-time="0" op-digest="0434e67501e3e7af47a547723c35b411"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;8:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="68" rc-code="7" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="546" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1"/>
</transient_attributes>
</node_state>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="11:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;11:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1600870206" last-run="1600870206" exec-time="2" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="849" queue-time="1" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_monitor_60000" operation_key="apc_snmp_node1_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;24:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="755" queue-time="0" op-digest="9dd197b1c8871a78c74a32b26949998d" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;13:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="27:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;27:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="106" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
<lrm_rsc_op id="ipmilan_node2_monitor_60000" operation_key="ipmilan_node2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;28:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870208" exec-time="87" queue-time="0" op-digest="467ef5117cbb737e5c6fc23b58809791" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;15:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;31:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="872" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_monitor_60000" operation_key="apc_snmp_node2_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="32:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;32:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="759" queue-time="0" op-digest="910a16919098d7bca091e972cf8844f5" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="18:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;18:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="564" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;19:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="558" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;20:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="562" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;21:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600925199" last-run="1600925199" exec-time="1881" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f" migrate_source="mk-a02n02" migrate_target="mk-a02n01"/>
<lrm_rsc_op id="srv07-el6_last_failure_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;9:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="552" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
<lrm_rsc_op id="srv07-el6_monitor_60000" operation_key="srv07-el6_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:83:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:83:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="61" rc-code="0" op-status="0" interval="60000" last-rc-change="1600925173" exec-time="539" queue-time="0" op-digest="65d0f0c9227f2593835f5de6c9cb9d0e"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2"/>
</transient_attributes>
</node_state>
</status>
</cib>
';
my $not_in_cluster = $anvil->Cluster->parse_cib({debug => 2, cib => $cib});
if ($not_in_cluster)
if (0)
{
print "This node isn't in the cluster.\n";
}
else
{
print "CIB parsed.\n";
my $xml = '';
my $problem = $anvil->Cluster->parse_crm_mon({debug => 2, xml => $xml});
if ($problem)
{
print "Problem reading or parsing the 'crm_mon' XML.\n";
}
else
{
print "crm_mon parsed.\n";
}
}
my $problem = $anvil->Cluster->parse_cib({debug => 2});
print "Problem: [".$problem."]\n";

Loading…
Cancel
Save