* Created Cluster->boot_server, ->shutdown_server and ->migrate_server methods that handle booting, migrating and shutting down servers. Also created the private method ->_set_server_constraint which is used by migrate and boot to set resource constraints to control where a server boots or migrates to.

* Did more work on parsing server data out of the CIB. There is still an issue with determining which node currently hosts a resource, however.
* Renamed Server->boot to ->boot_virsh, ->shutdown to ->shutdown_virsh and ->migrate to ->migrate_virsh to clarify that these methods work on the raw virsh calls, outside of pacemaker (indeed, they are what the pacemaker RA uses to do what pacemaker asks).
* Got more work done on the scan-cluster SA.
* Created the empty files for the pending scan-server SA.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 4 years ago
parent 0f7267eae1
commit 4dfe0cb5a0
  1. 755
      Anvil/Tools/Cluster.pm
  2. 40
      Anvil/Tools/Server.pm
  3. 12
      ocf/alteeve/server
  4. 29
      scancore-agents/scan-cluster/scan-cluster
  5. 453
      scancore-agents/scan-cluster/scan-cluster.sql
  6. 2
      scancore-agents/scan-cluster/scan-cluster.xml
  7. 0
      scancore-agents/scan-server/scan-server
  8. 0
      scancore-agents/scan-server/scan-server.sql
  9. 0
      scancore-agents/scan-server/scan-server.xml
  10. 26
      share/words.xml
  11. 282
      tools/test.pl

@ -14,11 +14,15 @@ our $VERSION = "3.0.0";
my $THIS_FILE = "Cluster.pm";
### Methods;
# boot_server
# check_node_status
# get_peers
# migrate_server
# parse_cib
# shutdown_server
# start_cluster
# which_node
# _set_server_constraint
=pod
@ -78,6 +82,162 @@ sub parent
# Public methods #
#############################################################################################################
=head2 boot_server
This uses pacemaker to boot a server.
If there is a problem, C<< !!error!! >> is returned.
Parameters;
=head3 server (required)
This is the name of the server to boot.
=head3 node (optional)
If set, a resource constraint is placed so that the server prefers one node over the other before it boots.
B<< Note >>; The method relies on pacemaker to boot the node. As such, if for some reason it decides the server can not be booted on the prefered node, it may boot on the other node. As such, this parameter does not guarantee that the server will be booted on the target node!
=head3 wait (optional, default '1')
This controls whether the method waits for the server to shut down before returning. By default, it will go into a loop and check every 2 seconds to see if the server is still running. Once it's found to be off, the method returns. If this is set to C<< 0 >>, the method will return as soon as the request to shut down the server is issued.
=cut
sub boot_server
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Cluster->boot_server()" }});
my $node = defined $parameter->{node} ? $parameter->{node} : "";
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $wait = defined $parameter->{'wait'} ? $parameter->{'wait'} : 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
node => $node,
server => $server,
'wait' => $wait,
}});
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->boot_server()", parameter => "server" }});
return("!!error!!");
}
my $host_type = $anvil->Get->host_type({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { host_type => $host_type }});
if ($host_type ne "node")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0146", variables => { server => $server }});
return("!!error!!");
}
my $problem = $anvil->Cluster->parse_cib({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
if ($problem)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0145", variables => { server => $server }});
return('!!error!!');
}
# Is this node fully in the cluster?
if (not $anvil->data->{cib}{parsed}{'local'}{ready})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0147", variables => { server => $server }});
return('!!error!!');
}
# Is the server one we know of?
if (not exists $anvil->data->{cib}{parsed}{data}{server}{$server})
{
# The server isn't in the pacemaker config.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0149", variables => { server => $server }});
return('!!error!!');
}
# Is the server already running? If so, do nothing.
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
if ($status eq "running")
{
# Nothing to do.
if ((not $node) or ($host eq $node))
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0548", variables => { server => $server }});
return(0);
}
else
{
# It's running, but on the other node.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "warning_0059", variables => {
server => $server,
requested_node => $node,
current_host => $host,
}});
return(0);
}
}
if ($node)
{
$anvil->Cluster->_set_server_constraint({
server => $server,
preferred_node => $node,
});
}
# Now boot the server.
my ($output, $return_code) = $anvil->System->call({debug => 3, shell_call => $anvil->data->{path}{exe}{pcs}." resource enable ".$server});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
if (not $wait)
{
# We're done.
return(0);
}
# Wait now for the server to start.
my $waiting = 1;
while($waiting)
{
$anvil->Cluster->parse_cib({debug => $debug});
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0552", variables => { server => $server }});
if ($host eq "running")
{
# It's up.
$waiting = 0;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0553", variables => { server => $server }});
}
else
{
# Wait a bit and check again.
sleep 2;
}
}
return(0);
}
=head2 check_node_status
This takes a node name (generally the short host name) and, using a C<< parse_cib >> call data (made before calling this method), the node's ready state will be checked. If the node is ready, C<< 1 >> is returned. If not, C<< 0 >> is returned. If there is a problem, C<< !!error!! >> is returned.
@ -104,7 +264,7 @@ sub check_node_status
if (not $node_name)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Database->get_host_from_uuid()", parameter => "host_uuid" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->get_host_from_uuid()", parameter => "host_uuid" }});
return("!!error!!");
}
@ -248,6 +408,181 @@ sub get_peers
return($peer);
}
=head2 migrate_server
This manipulates pacemaker's location constraints to trigger a pacemaker-controlled migration of one or more servers.
This method works by confirming that the server is running and it not on the target C<< node >>. If the server is server indeed needs to be migrated, a location constraint is set to give preference to the target node. Optionally, this method can wait until the migration is complete.
B<< Note >>: This method does not make the actual C<< virsh >> call! To perform a migration B<< OUTSIDE >> pacemaker, use C<< Server->migrate_virsh() >>.
Parameters;
=head3 server (required)
This is the server to migrate.
=head3 node (required)
This is the name of the node to move the server to.
=head3 wait (optional, default '1')
This controls whether the method waits for the server to shut down before returning. By default, it will go into a loop and check every 2 seconds to see if the server is still running. Once it's found to be off, the method returns. If this is set to C<< 0 >>, the method will return as soon as the request to shut down the server is issued.
=cut
sub migrate_server
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Cluster->migrate_server()" }});
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $node = defined $parameter->{node} ? $parameter->{node} : "";
my $wait = defined $parameter->{'wait'} ? $parameter->{'wait'} : 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
server => $server,
node => $node,
'wait' => $wait,
}});
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->migrate_server()", parameter => "server" }});
return("!!error!!");
}
my $host_type = $anvil->Get->host_type({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { host_type => $host_type }});
if ($host_type ne "node")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0154", variables => { server => $server }});
return("!!error!!");
}
my $problem = $anvil->Cluster->parse_cib({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
if ($problem)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0155", variables => { server => $server }});
return('!!error!!');
}
# Are both nodes fully in the cluster?
if (not $anvil->data->{cib}{parsed}{'local'}{ready})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0156", variables => { server => $server }});
return('!!error!!');
}
if (not $anvil->data->{cib}{parsed}{peer}{ready})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0157", variables => { server => $server }});
return('!!error!!');
}
# Is the server one we know of?
if (not exists $anvil->data->{cib}{parsed}{data}{server}{$server})
{
# The server isn't in the pacemaker config.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0158", variables => { server => $server }});
return('!!error!!');
}
# Is the server already running? If so, where?
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
if ($status eq "off")
{
# It's not running on either node, nothing to do.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "warning_0061", variables => {
server => $server,
requested_node => $node,
}});
return(0);
}
elsif (($status eq "running") && ($host eq $node))
{
# Already running on the target.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0549", variables => {
server => $server,
requested_node => $node,
}});
return(0);
}
elsif ($status ne "running")
{
# The server is in an unknown state.
# It's in an unknown state, abort.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "warning_0060", variables => {
server => $server,
current_host => $host,
current_state => $status,
}});
return('!!error!!');
}
# TODO: Record that the server is migrating
# change the constraint to trigger the move.
if ($node)
{
$anvil->Cluster->_set_server_constraint({
server => $server,
preferred_node => $node,
});
}
if (not $wait)
{
# We'll leave it to the scan-server scan agent to clear the migration flag from the database.
return(0);
}
# Wait now for the server to start.
my $waiting = 1;
while($waiting)
{
$anvil->Cluster->parse_cib({debug => $debug});
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0550", variables => {
server => $server,
requested_node => $node,
}});
if (($host eq "running") && ($host eq $node))
{
# It's done.
$waiting = 0;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0551", variables => {
server => $server,
requested_node => $node,
}});
}
else
{
# Wait a bit and check again.
sleep 2;
}
}
return(0);
}
=head2 parse_cib
This reads in the CIB XML and parses it. On success, it returns C<< 0 >>. On failure (ie: pcsd isn't running), returns C<< 1 >>.
@ -565,7 +900,17 @@ sub parse_cib
}});
# Is this me or the peer?
if (($node_name ne $anvil->Get->host_name) && ($node_name ne $anvil->Get->short_host_name))
if (($node_name eq $anvil->Get->host_name) or ($node_name eq $anvil->Get->short_host_name))
{
# Me.
$anvil->data->{cib}{parsed}{'local'}{ready} = $node_name;
$anvil->data->{cib}{parsed}{'local'}{name} = $node_name;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::local::ready" => $anvil->data->{cib}{parsed}{'local'}{ready},
"cib::parsed::local::name" => $anvil->data->{cib}{parsed}{'local'}{name},
}});
}
else
{
# It's our peer.
$anvil->data->{cib}{parsed}{peer}{ready} = $ready;
@ -701,10 +1046,300 @@ sub parse_cib
}});
}
# Hosted server information
foreach my $id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}})
{
my $node_name = $anvil->data->{cib}{parsed}{configuration}{nodes}{$id}{uname};
foreach my $lrm_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}})
{
foreach my $lrm_resource_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}})
{
my $lrm_resource_operations_count = keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}};
foreach my $lrm_rsc_op_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}})
{
my $type = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{type};
my $class = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{class};
my $operation = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{operation};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
lrm_resource_operations_count => $lrm_resource_operations_count,
type => $type,
class => $class,
operation => $operation,
lrm_rsc_op_id => $lrm_rsc_op_id,
}});
# Skip unless it's a server.
next if $type ne "server";
# This will be updated below if the server is running.
if (not exists $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id})
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status} = "off";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} = "unknown";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} = "-1";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code} = "-1";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation} = "";
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code} = "-1";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::status" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status},
"cib::parsed::data::server::${lrm_resource_id}::host" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host},
"cib::parsed::data::server::${lrm_resource_id}::last_monitor_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code},
"cib::parsed::data::server::${lrm_resource_id}::last_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_operation_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_return_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code},
}});
}
# If there are two LRM resource operation IDs, then the server is
# running on this node. Generally (always?) there will be a
# '$lrm_rsc_op_id' called '<server>_last_0'. If there is a second one
# with '_monitor' in it, the server is running locally (we always have
# a monitor operation defined).
if (($lrm_resource_operations_count > 1) && ($lrm_rsc_op_id !~ /_last_/))
{
# The server is (should be) running.
# - return code is from the RA's last status check.
# - exit-reason is the STDERR of the RA
# -
my $last_return_code = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
my $status = "unknown";
if ($last_return_code eq "0")
{
$status = "running";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { status => $status }});
}
elsif ($last_return_code eq "7")
{
$status = "stopped";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { status => $status }});
}
else
{
$status = "error_condition - rc: ".$last_return_code;
# Log all variables in case there is anything useful.
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 1, list => { status => $status }});
foreach my $variable (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}})
{
my $value = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 1, list => {
"cib::parsed::cib::status::node_state::${id}::lrm_id::${lrm_id}::lrm_resource::${lrm_resource_id}::lrm_rsc_op_id::${lrm_rsc_op_id}::${variable}" => $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable},
}});
}
}
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status} = $status;
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'on_node'};
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::status" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{status},
"cib::parsed::data::server::${lrm_resource_id}::host" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{host},
"cib::parsed::data::server::${lrm_resource_id}::last_monitor_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_monitor_rc_code},
}});
}
elsif ($lrm_rsc_op_id =~ /_last_failure_/)
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation} = $operation;
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_failure_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_operation},
"cib::parsed::data::server::${lrm_resource_id}::last_failure_return_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_failure_return_code},
}});
}
else
{
# This isn't a monirot operation, so it will contain the most
# recent data on the server.
if ($anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} eq "unknown")
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation} = $operation;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_operation" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation},
}});
}
if ($anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} eq "-1")
{
$anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code} = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{'rc-code'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::server::${lrm_resource_id}::last_operation_rc_code" => $anvil->data->{cib}{parsed}{data}{server}{$lrm_resource_id}{last_operation_rc_code},
}});
}
}
print "Node: [".$node_name."] (".$id."), lrm_id: [".$lrm_id."], lrm_resource_id: [".$lrm_resource_id."] (type: [".$type."], class: [".$class."]), lrm_rsc_op_id: [".$lrm_rsc_op_id."] (".$lrm_resource_operations_count.")\n";
foreach my $variable (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}})
{
my $value = $anvil->data->{cib}{parsed}{cib}{status}{node_state}{$id}{lrm_id}{$lrm_id}{lrm_resource}{$lrm_resource_id}{lrm_rsc_op_id}{$lrm_rsc_op_id}{$variable};
print "- Variable: [".$variable."], value: [".$value."]\n";
}
}
}
}
}
# Debug code.
foreach my $server (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{data}{server}})
{
my $last_operation = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_operation};
my $last_operation_rc_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_operation_rc_code};
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
my $last_monitor_rc_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_monitor_rc_code};
my $last_failure_operation = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_failure_operation};
my $last_failure_return_code = $anvil->data->{cib}{parsed}{data}{server}{$server}{last_failure_return_code};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:server' => $server,
's2:host' => $host,
's3:status' => $status,
's4:last_monitor_rc_code' => $last_monitor_rc_code,
's5:last_operation' => $last_operation,
's6:last_operation_rc_code' => $last_operation_rc_code,
's7:last_failure_operation' => $last_failure_operation,
's8:last_failure_return_code' => $last_failure_return_code,
}});
}
return($problem);
}
=head2 shutdown_server
This shuts down a server that is running on the Anvil! system.
Parameters;
=head3 server (required)
This is the name of the server to shut down.
=head3 wait (optional, default '1')
This controls whether the method waits for the server to shut down before returning. By default, it will go into a loop and check every 2 seconds to see if the server is still running. Once it's found to be off, the method returns. If this is set to C<< 0 >>, the method will return as soon as the request to shut down the server is issued.
=cut
sub shutdown_server
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Cluster->shutdown_server()" }});
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $wait = defined $parameter->{'wait'} ? $parameter->{'wait'} : 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
server => $server,
'wait' => $wait,
}});
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->shutdown_server()", parameter => "server" }});
return("!!error!!");
}
my $host_type = $anvil->Get->host_type({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { host_type => $host_type }});
if ($host_type ne "node")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0150", variables => { server => $server }});
return("!!error!!");
}
my $problem = $anvil->Cluster->parse_cib({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
if ($problem)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0151", variables => { server => $server }});
return('!!error!!');
}
# Is this node fully in the cluster?
if (not $anvil->data->{cib}{parsed}{'local'}{ready})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0152", variables => { server => $server }});
return('!!error!!');
}
# Is the server one we know of?
if (not exists $anvil->data->{cib}{parsed}{data}{server}{$server})
{
# The server isn't in the pacemaker config.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0153", variables => { server => $server }});
return('!!error!!');
}
# Is the server already running? If so, do nothing.
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
if ($status eq "off")
{
# Already off.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0548", variables => { server => $server }});
return(0);
}
elsif ($status ne "running")
{
# It's in an unknown state, abort.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "warning_0060", variables => {
server => $server,
current_host => $host,
current_state => $status,
}});
return('!!error!!');
}
# Now shut down the server.
my ($output, $return_code) = $anvil->System->call({debug => 3, shell_call => $anvil->data->{path}{exe}{pcs}." resource disable ".$server});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
if (not $wait)
{
# We're done.
return(0);
}
# Wait now for the server to start.
my $waiting = 1;
while($waiting)
{
$anvil->Cluster->parse_cib({debug => $debug});
my $status = $anvil->data->{cib}{parsed}{data}{server}{$server}{status};
my $host = $anvil->data->{cib}{parsed}{data}{server}{$server}{host};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
status => $status,
host => $host,
}});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0554", variables => { server => $server }});
if ($host eq "running")
{
# Wait a bit and check again.
sleep 2;
}
else
{
# It's down.
$waiting = 0;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0555", variables => { server => $server }});
}
}
return(0);
}
=head2 start_cluster
This will join the local node to the pacemaker cluster. Optionally, it can try to start the cluster on both nodes if C<< all >> is set.
@ -818,25 +1453,24 @@ sub which_node
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
anvil_name => $anvil_name,
node1_host_uuid => $node1_host_uuid
node2_host_uuid => $node2_host_uuid
node1_host_uuid => $node1_host_uuid,
node2_host_uuid => $node2_host_uuid,
}});
if ($node_uuid eq $node1_host_uuid)
{
$node_id = "node1";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { node_id => $node_id }});
$node_is = "node1";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { node_is => $node_is }});
last;
}
elsif ($node_uuid eq $node2_host_uuid)
{
$node_id = "node2";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { node_id => $node_id }});
$node_is = "node2";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { node_is => $node_is }});
last;
}
}
return($node_is);
}
@ -850,3 +1484,106 @@ sub which_node
#############################################################################################################
# Private functions #
#############################################################################################################
=head2 _set_server_constraint
This is a private method used to set a preferencial location constraint for a server. It takes a server name and a preferred host node. It checks to see if a location constraint exists and, if so, which node is preferred. If it is not the requested node, the constraint is updated. If no constraint exists, it is created.
Returns C<< !!error!! >> if there is a problem, C<< 0 >> otherwise
Parameters;
=head3 server (required)
This is the name of the server whose preferred host node priproty is being set.
=head3 preferred_node (required)
This is the name the node that a server will prefer to run on.
=cut
sub _set_server_constraint
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Cluster->_set_server_constraint()" }});
my $preferred_node = defined $parameter->{preferred_node} ? $parameter->{preferred_node} : "";
my $server = defined $parameter->{server} ? $parameter->{server} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
server => $server,
preferred_node => $preferred_node,
}});
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->_set_server_constraint()", parameter => "server" }});
return("!!error!!");
}
if (not $preferred_node)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Cluster->_set_server_constraint()", parameter => "preferred_node" }});
return("!!error!!");
}
if (not exists $anvil->data->{cib}{parsed}{data}{cluster}{name})
{
my $problem = $anvil->Cluster->parse_cib({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
if ($problem)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0145", variables => { server => $server }});
}
}
# Is this node fully in the cluster?
if (not $anvil->data->{cib}{parsed}{'local'}{ready})
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0148", variables => {
server => $server,
node => $preferred_node,
}});
return('!!error!!');
}
my $peer_name = $anvil->data->{cib}{parsed}{peer}{name};
my $local_name = $anvil->data->{cib}{parsed}{'local'}{name};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
peer_name => $peer_name,
local_name => $local_name,
}});
my $shell_call = "";
if ($preferred_node eq $peer_name)
{
$shell_call = $anvil->data->{path}{exe}{pcs}." constraint location ".$server." prefers ".$peer_name."=200 ".$local_name."=100";
}
elsif ($preferred_node eq $local_name)
{
$shell_call = $anvil->data->{path}{exe}{pcs}." constraint location ".$server." prefers ".$peer_name."=100 ".$local_name."=200";
}
else
{
# Invalid
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0144", variables => {
server => $server,
node => $preferred_node,
node1 => $local_name,
node2 => $peer_name,
}});
return("!!error!!");
}
# Change the location constraint
my ($output, $return_code) = $anvil->System->call({debug => 3, shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
return(0);
}

@ -12,12 +12,12 @@ our $VERSION = "3.0.0";
my $THIS_FILE = "Server.pm";
### Methods;
# boot
# boot_virsh
# find
# get_status
# map_network
# migrate
# shutdown
# migrate_virsh
# shutdown_virsh
=pod
@ -78,13 +78,13 @@ sub parent
# Public methods #
#############################################################################################################
=head2 boot
=head2 boot_virsh
This takes a server name and tries to boot it (using C<< virsh create /mnt/shared/definition/<server>.xml >>. It requires that any supporting systems already be started (ie: DRBD resource is up).
If booted, C<< 1 >> is returned. Otherwise, C<< 0 >> is returned.
my ($booted) = $anvil->Server->boot({server => "test_server"});
# my ($booted) = $anvil->Server->boot_virsh({server => "test_server"});
Parameters;
@ -99,13 +99,13 @@ By default, the definition file used will be named C<< <server>.xml >> in the C<
This is the name of the server, as it appears in C<< virsh >>.
=cut
sub boot
sub boot_virsh
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->boot()" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->boot_virsh()" }});
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $definition = defined $parameter->{definition} ? $parameter->{definition} : "";
@ -117,7 +117,7 @@ sub boot
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->boot()", parameter => "server" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->boot_virsh()", parameter => "server" }});
return(1);
}
if (not $definition)
@ -595,11 +595,15 @@ Provision order:
return(0);
}
=head2 migrate
=head2 migrate_virsh
This will migrate (push or pull) a server from one node to another. If the migration was successful, C<< 1 >> is returned. Otherwise, C<< 0 >> is returned with a (hopefully) useful error being logged.
NOTE: It is assumed that sanity checks are completed before this method is called.
Generally speaking, this is B<< NOT >> the method you want to call.
B<< Warning >>: This method is meant to do the raw C<< virsh >> call, it is NOT designed to be called by pacemaker. To migrate via pacemaker, use C<< Cluster->migrate >>.
B<< Note >>: It is assumed that sanity checks are completed before this method is called.
Parameters;
@ -618,13 +622,13 @@ If set, the server will be pulled.
This is the host name (or IP) Of the host that the server will be pushed to, if C<< source >> is not set. When this is not passed, the local full host name is used as default.
=cut
sub migrate
sub migrate_virsh
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->migrate()" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->migrate_virsh()" }});
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $source = defined $parameter->{source} ? $parameter->{source} : "";
@ -638,7 +642,7 @@ sub migrate
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->migrate()", parameter => "server" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->migrate_virsh()", parameter => "server" }});
return($success);
}
@ -745,13 +749,13 @@ sub migrate
return($success);
}
=head2 shutdown
=head2 shutdown_virsh
This takes a server name and tries to shut it down. If the server was found locally, the shut down is requested and this method will wait for the server to actually shut down before returning.
If shut down, C<< 1 >> is returned. If the server wasn't found or another problem occurs, C<< 0 >> is returned.
my ($shutdown) = $anvil->Server->shutdown({server => "test_server"});
my ($shutdown) = $anvil->Server->shutdown_virsh({server => "test_server"});
Parameters;
@ -770,13 +774,13 @@ This is the name of the server (as it appears in C<< virsh >>) to shut down.
By default, this method will wait indefinetly for the server to shut down before returning. If this is set to a non-zero number, the method will wait that number of seconds for the server to shut dwwn. If the server is still not off by then, C<< 0 >> is returned.
=cut
sub shutdown
sub shutdown_virsh
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->shutdown()" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->shutdown_virsh()" }});
my $server = defined $parameter->{server} ? $parameter->{server} : "";
my $force = defined $parameter->{force} ? $parameter->{force} : 0;
@ -789,7 +793,7 @@ sub shutdown
if (not $server)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->shutdown()", parameter => "server" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Server->shutdown_virsh()", parameter => "server" }});
return($success);
}
if (($wait) && ($wait =~ /\D/))

@ -3,7 +3,7 @@
# This is the resource agent used to manage servers on the Anvil! Intelligent Availability platform.
#
# License: GNU General Public License (GPL) v2+
# (c) 1997-2019 - Alteeve's Niche! Inc.
# (c) 1997-2020 - Alteeve's Niche! Inc.
#
# WARNING: This is a pretty purpose-specific resource agent. No effort was made to test this on an rgmanager
# cluster or on any configuration outside how the Anvil! m3 uses it. If you plan to adapt it to
@ -665,7 +665,7 @@ sub start_server
start_drbd_resource($anvil);
# Still alive? Boot!
my ($success) = $anvil->Server->boot({debug => 3, server => $server});
my ($success) = $anvil->Server->boot_virsh({debug => 3, server => $server});
if ($success)
{
# Success!
@ -955,7 +955,7 @@ sub stop_server
$anvil->Server->get_status({debug => 3, server => $server});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0313", variables => { server => $server }});
my $success = $anvil->Server->shutdown({debug => 3, server => $server});
my $success = $anvil->Server->shutdown_virsh({debug => 3, server => $server});
if (not $success)
{
# Something went wrong. Details should be in the logs.
@ -997,7 +997,7 @@ sub server_status
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, priority => "alert", key => "log_0331", variables => { timeout => $anvil->data->{environment}{OCF_RESKEY_CRM_meta_timeout} }});
}
# Is 'libvirtd' running? We'll wait up to halfthe timeout for it to start (in case it _just_ started)
# Is 'libvirtd' running? We'll wait up to half the timeout for it to start (in case it _just_ started)
# before timing out.
my $wait_until = time + ($anvil->data->{environment}{OCF_RESKEY_CRM_meta_timeout} / 2000); # Devide by 2000 to convert to seconds and total second halved.
my $look_for_pid = 0;
@ -1358,7 +1358,7 @@ sub migrate_server
}
# If we're still alive, we're ready to migrate.
($migrated) = $anvil->Server->migrate({
($migrated) = $anvil->Server->migrate_virsh({
debug => 3,
server => $server,
source => $source,
@ -1413,7 +1413,7 @@ sub migrate_server
validate_all($anvil);
# Call the pull migation.
($migrated) = $anvil->Server->migrate({
($migrated) = $anvil->Server->migrate_virsh({
debug => 3,
server => $server,
source => $source,

@ -54,7 +54,7 @@ if ($problem)
$anvil->nice_exit({exit_code => 1});
}
$anvil->Log->entry({test => 1, source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_log_0001", variables => { program => $THIS_FILE }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_log_0001", variables => { program => $THIS_FILE }});
if ($anvil->data->{switches}{purge})
{
# This can be called when doing bulk-database purges.
@ -77,6 +77,7 @@ if ($host_type ne "node")
# Read the data.
collect_data($anvil);
# Read last scan
$anvil->nice_exit({exit_code => 0});
@ -108,17 +109,35 @@ sub collect_data
### TODO: If we're node 2, or not in the cluster, only update our information in the
### 'scan_cluster_nodes' table. Node 1 will update everything else if it's 'ready' (else node 2
### will, if it's ready)
### will, if it's ready).
my $i_am = $anvil->Cluster->which_node({debug => 1});
my $my_node_name = $anvil->data->{cib}{parsed}{'local'}{name};
my $peer_node_name = $anvil->data->{cib}{parsed}{peer}{name};
my $peer_ready = $anvil->data->{cib}{parsed}{peer}{ready};
my $local_ready = $anvil->data->{cib}{parsed}{data}{node}{$my_node_name}{node_state}{ready};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
i_am => $i_am,
my_node_name => $my_node_name,
peer_node_name => $peer_node_name,
peer_ready => $peer_ready,
local_ready => $local_ready,
}});
my $ready = $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{ready};
### TODO: Change the logic so that when both nodes are in the cluster, the node with the lowest
### load does the scan (typically the node without VMs).
if (($i_am eq "node2") && ($peer_ready))
{
# We're not going to run.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "scan_cluster_message_0001"});
$anvil->nice_exit({exit_code => 0});
}
# If we're still alive, we're either node 1, or we're node 2 and node 1 is not ready. If we're not ready,
if ($stonith_max_attempts ne "INFINITY")
{
### TODO: Call pcs to update
}
# Pick up node data
return(0);

@ -56,58 +56,449 @@ CREATE TRIGGER trigger_scan_cluster
CREATE TABLE scan_cluster_nodes (
scan_cluster_node_uuid uuid primary key,
scan_cluster_node_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_node_host_uuid uuid not null, -- This is the host UUID of the node.
scan_cluster_node_name text not null, -- This is the host name as reported by pacemaker. It _should_ match up to a host name in 'hosts'.
scan_cluster_node_pacemaker_id numeric not null, -- This is the internal pacemaker ID number of this node.
my $node_id = $anvil->data->{cib}{parsed}{data}{node}{$node_name}{id};
my $in_ccm = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{in_ccm} eq "true" ? 1 : 0; # 'true' or 'false' - Corosync member
my $crmd = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{crmd} eq "online" ? 1 : 0; # 'online' or 'offline' - In corosync process group
my $join = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'join'} eq "member" ? 1 : 0; # 'member' or 'down' - Completed controller join process
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_node_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster OWNER TO admin;
ALTER TABLE scan_cluster_nodes OWNER TO admin;
CREATE TABLE history.scan_cluster (
CREATE TABLE history.scan_cluster_nodes (
history_id bigserial,
scan_cluster_uuid uuid,
scan_cluster_host_uuid uuid,
scan_cluster_node_uuid uuid,
scan_cluster_node_scan_cluster_uuid uuid,
scan_cluster_node_host_uuid uuid,
scan_cluster_node_name text,
scan_cluster_stonith_enabled boolean,
scan_cluster_maintenance_mode boolean,
scan_cluster_node_pacemaker_id numeric,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster OWNER TO admin;
ALTER TABLE history.scan_cluster_nodes OWNER TO admin;
CREATE FUNCTION history_scan_cluster() RETURNS trigger
CREATE FUNCTION history_scan_cluster_nodes() RETURNS trigger
AS $$
DECLARE
history_scan_cluster RECORD;
history_scan_cluster_nodes RECORD;
BEGIN
SELECT INTO history_scan_cluster * FROM scan_cluster WHERE scan_cluster_uuid=new.scan_cluster_uuid;
INSERT INTO history.scan_cluster
(scan_cluster_uuid,
scan_cluster_host_uuid,
scan_cluster_name,
scan_cluster_stonith_enabled,
scan_cluster_maintenance_mode,
SELECT INTO history_scan_cluster_nodes * FROM scan_cluster_nodes WHERE scan_cluster_node_uuid=new.scan_cluster_node_uuid;
INSERT INTO history.scan_cluster_nodes
(scan_cluster_node_uuid,
scan_cluster_node_scan_cluster_uuid,
scan_cluster_node_host_uuid,
scan_cluster_node_name,
scan_cluster_node_pacemaker_id,
modified_date)
VALUES
(history_scan_cluster.scan_cluster_uuid,
history_scan_cluster.scan_cluster_host_uuid,
history_scan_cluster.scan_cluster_name,
history_scan_cluster.scan_cluster_stonith_enabled,
history_scan_cluster.scan_cluster_maintenance_mode,
history_scan_cluster.modified_date);
(history_scan_cluster_nodes.scan_cluster_node_uuid,
history_scan_cluster_nodes.scan_cluster_node_scan_cluster_uuid,
history_scan_cluster_nodes.scan_cluster_node_host_uuid,
history_scan_cluster_nodes.scan_cluster_node_name,
history_scan_cluster_nodes.scan_cluster_node_pacemaker_id,
history_scan_cluster_nodes.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_cluster() OWNER TO admin;
ALTER FUNCTION history_scan_cluster_nodes() OWNER TO admin;
CREATE TRIGGER trigger_scan_cluster_nodes
AFTER INSERT OR UPDATE ON scan_cluster_nodes
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_nodes();
CREATE TRIGGER trigger_scan_cluster
AFTER INSERT OR UPDATE ON scan_cluster
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster();
CREATE TABLE scan_cluster_stoniths (
scan_cluster_stonith_uuid uuid primary key,
scan_cluster_stonith_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_stonith_host_uuid uuid not null, -- This is the host UUID of the node.
scan_cluster_stonith_name text not null, -- This is the 'stonith id'
scan_cluster_stonith_arguments text not null, -- This is the fence agent + collection of primitive variable=value pairs (the nvpairs)
scan_cluster_stonith_operations text not null, -- This is the collection of operation variable=value pairs (the nvpairs)
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_stonith_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster_stoniths OWNER TO admin;
CREATE TABLE history.scan_cluster_stoniths (
history_id bigserial,
scan_cluster_stonith_uuid uuid,
scan_cluster_stonith_scan_cluster_uuid uuid,
scan_cluster_stonith_host_uuid uuid,
scan_cluster_stonith_name text,
scan_cluster_stonith_arguments text,
scan_cluster_stonith_operations text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster_stoniths OWNER TO admin;
CREATE FUNCTION history_scan_cluster_stoniths() RETURNS trigger
AS $$
DECLARE
history_scan_cluster_stoniths RECORD;
BEGIN
SELECT INTO history_scan_cluster_stoniths * FROM scan_cluster_stoniths WHERE scan_cluster_stonith_uuid=new.scan_cluster_stonith_uuid;
INSERT INTO history.scan_cluster_stoniths
(scan_cluster_stonith_uuid,
scan_cluster_stonith_scan_cluster_uuid,
scan_cluster_stonith_host_uuid,
scan_cluster_stonith_name,
scan_cluster_stonith_arguments,
scan_cluster_stonith_operations,
modified_date)
VALUES
(history_scan_cluster_stoniths.scan_cluster_stonith_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_scan_cluster_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_host_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_name,
history_scan_cluster_stoniths.scan_cluster_stonith_arguments,
history_scan_cluster_stoniths.scan_cluster_stonith_operations,
history_scan_cluster_stoniths.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_cluster_stoniths() OWNER TO admin;
CREATE TRIGGER trigger_scan_cluster_stoniths
AFTER INSERT OR UPDATE ON scan_cluster_stoniths
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_stoniths();
CREATE TABLE scan_cluster_servers (
scan_cluster_server_uuid uuid primary key,
scan_cluster_server_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_server_name text not null, -- This is the name of the server (ocf primitive id)
scan_cluster_server_state text not null, -- This is the 'running' or why it's off (off by user, etc)
scan_cluster_server_host_name uuid not null, -- This is the (cluster) name of the node hosting the server. Blank if the server is off.
scan_cluster_server_arguments text not null, -- This is the collection of primitive variable=value pairs (the nvpairs)
scan_cluster_server_operations text not null, -- This is the collection of operation variable=value pairs (the nvpairs)
scan_cluster_server_meta text not null, -- This is the collection of meta attribute variable=value pairs (the nvpairs)
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_server_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster_servers OWNER TO admin;
CREATE TABLE history.scan_cluster_servers (
history_id bigserial,
scan_cluster_server_uuid uuid,
scan_cluster_server_scan_cluster_uuid uuid,
scan_cluster_server_name text,
scan_cluster_server_arguments text,
scan_cluster_server_operations text,
scan_cluster_server_meta text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster_servers OWNER TO admin;
CREATE FUNCTION history_scan_cluster_servers() RETURNS trigger
AS $$
DECLARE
history_scan_cluster_servers RECORD;
BEGIN
SELECT INTO history_scan_cluster_servers * FROM scan_cluster_servers WHERE scan_cluster_server_uuid=new.scan_cluster_server_uuid;
INSERT INTO history.scan_cluster_servers
(scan_cluster_server_uuid,
scan_cluster_server_scan_cluster_uuid,
scan_cluster_server_name,
scan_cluster_server_arguments,
scan_cluster_server_operations,
scan_cluster_server_meta,
modified_date)
VALUES
(history_scan_cluster_servers.scan_cluster_server_uuid,
history_scan_cluster_servers.scan_cluster_server_scan_cluster_uuid,
history_scan_cluster_servers.scan_cluster_server_host_uuid,
history_scan_cluster_servers.scan_cluster_server_name,
history_scan_cluster_servers.scan_cluster_server_arguments,
history_scan_cluster_servers.scan_cluster_server_operations,
history_scan_cluster_servers.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_cluster_servers() OWNER TO admin;
CREATE TRIGGER trigger_scan_cluster_servers
AFTER INSERT OR UPDATE ON scan_cluster_servers
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_servers();
-- Example CIB
# pcs resource
* srv07-el6 (ocf::alteeve:server): Stopped (disabled)
* srv01-sql (ocf::alteeve:server): Started mk-a02n01
* srv02-lab1 (ocf::alteeve:server): Started mk-a02n01
* srv08-m2-psql (ocf::alteeve:server): Stopped (disabled)
<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="418" num_updates="4" admin_epoch="0" cib-last-written="Mon Sep 21 13:30:38 2020" update-origin="mk-a02n01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1597956504"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01"/>
<node id="2" uname="mk-a02n02"/>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv01-sql" provider="alteeve" type="server">
<instance_attributes id="srv01-sql-instance_attributes">
<nvpair id="srv01-sql-instance_attributes-name" name="name" value="srv01-sql"/>
</instance_attributes>
<meta_attributes id="srv01-sql-meta_attributes">
<nvpair id="srv01-sql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv01-sql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv01-sql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv01-sql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv01-sql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv01-sql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv01-sql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv01-sql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv02-lab1" provider="alteeve" type="server">
<instance_attributes id="srv02-lab1-instance_attributes">
<nvpair id="srv02-lab1-instance_attributes-name" name="name" value="srv02-lab1"/>
</instance_attributes>
<meta_attributes id="srv02-lab1-meta_attributes">
<nvpair id="srv02-lab1-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv02-lab1-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv02-lab1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv02-lab1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv02-lab1-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv02-lab1-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv02-lab1-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv02-lab1-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv08-m2-psql" provider="alteeve" type="server">
<instance_attributes id="srv08-m2-psql-instance_attributes">
<nvpair id="srv08-m2-psql-instance_attributes-name" name="name" value="srv08-m2-psql"/>
</instance_attributes>
<meta_attributes id="srv08-m2-psql-meta_attributes">
<nvpair id="srv08-m2-psql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv08-m2-psql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv08-m2-psql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv08-m2-psql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv08-m2-psql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv08-m2-psql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv08-m2-psql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv08-m2-psql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="location-srv07-el6-mk-a02n01-200" node="mk-a02n01" rsc="srv07-el6" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n02-100" node="mk-a02n02" rsc="srv07-el6" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n01-200" node="mk-a02n01" rsc="srv01-sql" score="200"/>
<rsc_location id="location-srv01-sql-mk-a02n02-100" node="mk-a02n02" rsc="srv01-sql" score="100"/>
<rsc_location id="location-srv02-lab1-mk-a02n01-200" node="mk-a02n01" rsc="srv02-lab1" score="200"/>
<rsc_location id="location-srv02-lab1-mk-a02n02-100" node="mk-a02n02" rsc="srv02-lab1" score="100"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n01-200" node="mk-a02n01" rsc="srv08-m2-psql" score="200"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n02-100" node="mk-a02n02" rsc="srv08-m2-psql" score="100"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="11:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;11:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="1" queue-time="1" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;23:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="623" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_monitor_60000" operation_key="apc_snmp_node1_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;24:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="556" queue-time="0" op-digest="9dd197b1c8871a78c74a32b26949998d" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;13:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="27:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;27:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="100" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
<lrm_rsc_op id="ipmilan_node2_monitor_60000" operation_key="ipmilan_node2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;28:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="86" queue-time="0" op-digest="467ef5117cbb737e5c6fc23b58809791" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;15:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;31:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="603" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_monitor_60000" operation_key="apc_snmp_node2_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="32:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;32:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="555" queue-time="0" op-digest="910a16919098d7bca091e972cf8844f5" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="17:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;17:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="605" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="18:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;18:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="604" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;19:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="603" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;20:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="602" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;21:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="172" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
<lrm_rsc_op id="ipmilan_node1_monitor_60000" operation_key="ipmilan_node1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;22:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708716" exec-time="90" queue-time="0" op-digest="7064441a5f8ccc94d13cc9a1433de0a5" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;2:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;25:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="666" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_monitor_60000" operation_key="apc_snmp_node1_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="26:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;26:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708717" exec-time="574" queue-time="1" op-digest="da20bfed231d75a3b22f97eb06bb445f" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;4:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;29:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="675" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_monitor_60000" operation_key="apc_snmp_node2_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="30:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;30:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708717" exec-time="565" queue-time="0" op-digest="5b8d168b9627dad87e1ba2edace17f1e" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;6:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;7:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="598" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;19:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1600709387" last-run="1600709387" exec-time="13119" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_monitor_60000" operation_key="srv01-sql_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;20:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1600709400" exec-time="546" queue-time="0" op-digest="0434e67501e3e7af47a547723c35b411"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;22:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="50" rc-code="0" op-status="0" interval="0" last-rc-change="1600709438" last-run="1600709438" exec-time="12668" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
<lrm_rsc_op id="srv02-lab1_monitor_60000" operation_key="srv02-lab1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;23:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="51" rc-code="0" op-status="0" interval="60000" last-rc-change="1600709451" exec-time="549" queue-time="0" op-digest="435d654a0384ef5a77a7517d682950ce"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;10:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="596" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
--

@ -21,7 +21,7 @@ NOTE: All string keys MUST be prefixed with the agent name! ie: 'scan_cluster_lo
<key name="scan_cluster_log_0002">This host is a: [#!variable!host_type!#], this agent is only useful on nodes. Exiting.</key>
<!-- Message entries (usually meant to be alerts) -->
<key name="scan_cluster_message_0001"></key>
<key name="scan_cluster_message_0001">We're node 2, and node 1 is running as well. Exiting as only one node needs to run this agent.</key>
<!-- Units -->
<key name="scan_cluster_unit_0001"></key>

@ -216,6 +216,21 @@ The error was:
<key name="error_0141">There appears to be no mail server in the database with the UUID: [#!variable!uuid!#].</key>
<key name="error_0142">There alert level: [#!variable!alert_level!#] is invalid. Valid values are '1' / 'critical', '2' / 'warning, '3' / 'notice', and '4' / 'info'.</key>
<key name="error_0143">Failed to write the email alert file: [#!variable!file!#]! Unable to process the alert. Check the logs above for possible reasons for the error.</key>
<key name="error_0144">I was asked to change the preferred host node of the server: [#!variable!server!#] to: [#!variable!node!#], but that doesn't match the name of either node in the cluster. The node names are: [#!variable!node1!#] and [#!variable!node2!#].</key>
<key name="error_0145">Unable to boot the server: [#!variable!server!#] as the cluster isn't running or there was a problem parsing the cluster CIB.</key>
<key name="error_0146">Unable to boot the server: [#!variable!server!#] as this host is not a node.</key>
<key name="error_0147">Unable to boot the server: [#!variable!server!#] as this node is not (yet) a full member of the cluster.</key>
<key name="error_0148">Unable to set the preferred host of the server: [#!variable!server!#] to: [#!variable!node!#] as this node is not (yet) a full member of the cluster.</key>
<key name="error_0149">Unable to boot the server: [#!variable!server!#] as this server was not found in the cluster information base (CIB).</key>
<key name="error_0150">Unable to shut down the server: [#!variable!server!#] as this host is not a node.</key>
<key name="error_0151">Unable to shut down the server: [#!variable!server!#] as the cluster isn't running or there was a problem parsing the cluster CIB.</key>
<key name="error_0152">Unable to shut down the server: [#!variable!server!#] as this node is not (yet) a full member of the cluster.</key>
<key name="error_0153">Unable to shut down the server: [#!variable!server!#] as this server was not found in the cluster information base (CIB).</key>
<key name="error_0154">Unable to migrate the server: [#!variable!server!#] as this host is not a node.</key>
<key name="error_0155">Unable to migrate the server: [#!variable!server!#] as the cluster isn't running or there was a problem parsing the cluster CIB.</key>
<key name="error_0156">Unable to migrate the server: [#!variable!server!#] as this node is not (yet) a full member of the cluster.</key>
<key name="error_0157">Unable to migrate the server: [#!variable!server!#] as the peer node is not (yet) a full member of the cluster.</key>
<key name="error_0158">Unable to migrate the server: [#!variable!server!#] as this server was not found in the cluster information base (CIB).</key>
<!-- Table headers -->
<key name="header_0001">Current Network Interfaces and States</key>
@ -1032,6 +1047,14 @@ The file: [#!variable!file!#] needs to be updated. The difference is:
<key name="log_0545">The table: [#!variable!table!#] does NOT exists in the database on the host: [#!variable!host!#]. Will load the schema file: [#!variable!file!#] now.</key>
<key name="log_0546">The passed in 'temperature_state' value: [#!variable!temperature_state!#] is invalid. The value must be 'ok', 'warning' or 'critical'.</key>
<key name="log_0547">The passed in 'temperature_is' value: [#!variable!temperature_is!#] is invalid. The value must be 'nominal', 'warning' or 'critical'.</key>
<key name="log_0548">The server: [#!variable!server!#] is already running, no need to boot it.</key>
<key name="log_0549">The server: [#!variable!server!#] is already running on the target node: [#!variable!requested_node!#], migration not needed.</key>
<key name="log_0550">Waiting for the server: [#!variable!server!#] to finish migrating to the node: [#!variable!requested_node!#]...</key>
<key name="log_0551">The migration of the server: [#!variable!server!#] to the node: [#!variable!requested_node!#] is complete!</key>
<key name="log_0552">Waiting for the server: [#!variable!server!#] to boot...</key>
<key name="log_0553">The server: [#!variable!server!#] has booted!</key>
<key name="log_0554">Waiting for the server: [#!variable!server!#] to shut down...</key>
<key name="log_0555">The server: [#!variable!server!#] is now off.</key>
<!-- Messages for users (less technical than log entries), though sometimes used for logs, too. -->
<key name="message_0001">The host name: [#!variable!target!#] does not resolve to an IP address.</key>
@ -1814,6 +1837,9 @@ The error was:
<key name="warning_0056">[ Warning ] - The DR Host is set to the same machine as Node 2.</key>
<key name="warning_0057">[ Warning ] - The 'libvirtd' daemon is not running. Checking to see if the server is running by looking for its PID (server state won't be available). Please start 'libvirtd'!</key>
<key name="warning_0058">[ Warning ] - The server: [#!variable!server!#] is in a crashed state!</key>
<key name="warning_0059">[ Warning ] - The server: [#!variable!server!#] was asked to be booted on: [#!variable!requested_node!#], but it is is already running on: [#!variable!current_host!#].</key>
<key name="warning_0060">[ Warning ] - The server: [#!variable!server!#] was asked to be shutdown, but it's in an unexpected state: [#!variable!state!#] on the host: [#!variable!current_host!#]. Aborting.</key>
<key name="warning_0061">[ Warning ] - The server: [#!variable!server!#] was asked to be migrated to: [#!variable!requested_node!#], but the server is off. Aborting.</key>
</language>
<!-- 日本語 -->

@ -29,7 +29,287 @@ print "Connecting to the database(s);\n";
$anvil->Database->connect({debug => 3});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, 'print' => 1, level => 2, secure => 0, key => "log_0132"});
my $not_in_cluster = $anvil->Cluster->parse_cib({debug => 2});
$anvil->Cluster->shutdown_server({
debug => 2,
server => "srv07-el6",
});
$anvil->Cluster->shutdown_server({
debug => 2,
server => "srv01-sql",
});
exit;
my $cib = '<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="453" num_updates="8" admin_epoch="0" cib-last-written="Thu Sep 24 01:26:31 2020" update-origin="mk-a02n01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="1">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1600924958"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01"/>
<node id="2" uname="mk-a02n02"/>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv01-sql" provider="alteeve" type="server">
<instance_attributes id="srv01-sql-instance_attributes">
<nvpair id="srv01-sql-instance_attributes-name" name="name" value="srv01-sql"/>
</instance_attributes>
<meta_attributes id="srv01-sql-meta_attributes">
<nvpair id="srv01-sql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv01-sql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv01-sql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv01-sql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv01-sql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv01-sql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv01-sql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv01-sql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv02-lab1" provider="alteeve" type="server">
<instance_attributes id="srv02-lab1-instance_attributes">
<nvpair id="srv02-lab1-instance_attributes-name" name="name" value="srv02-lab1"/>
</instance_attributes>
<meta_attributes id="srv02-lab1-meta_attributes">
<nvpair id="srv02-lab1-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv02-lab1-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv02-lab1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv02-lab1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv02-lab1-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv02-lab1-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv02-lab1-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv02-lab1-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv08-m2-psql" provider="alteeve" type="server">
<instance_attributes id="srv08-m2-psql-instance_attributes">
<nvpair id="srv08-m2-psql-instance_attributes-name" name="name" value="srv08-m2-psql"/>
</instance_attributes>
<meta_attributes id="srv08-m2-psql-meta_attributes">
<nvpair id="srv08-m2-psql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv08-m2-psql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv08-m2-psql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv08-m2-psql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv08-m2-psql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv08-m2-psql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv08-m2-psql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv08-m2-psql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="location-srv08-m2-psql-mk-a02n01-200" node="mk-a02n01" rsc="srv08-m2-psql" score="200"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n02-100" node="mk-a02n02" rsc="srv08-m2-psql" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n02-100" node="mk-a02n02" rsc="srv01-sql" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n01-200" node="mk-a02n01" rsc="srv01-sql" score="200"/>
<rsc_location id="location-srv02-lab1-mk-a02n02-100" node="mk-a02n02" rsc="srv02-lab1" score="100"/>
<rsc_location id="location-srv02-lab1-mk-a02n01-200" node="mk-a02n01" rsc="srv02-lab1" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n01-200" node="mk-a02n01" rsc="srv07-el6" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n02-100" node="mk-a02n02" rsc="srv07-el6" score="100"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;21:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="115" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
<lrm_rsc_op id="ipmilan_node1_monitor_60000" operation_key="ipmilan_node1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;22:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870208" exec-time="90" queue-time="0" op-digest="7064441a5f8ccc94d13cc9a1433de0a5" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;2:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;25:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="907" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_monitor_60000" operation_key="apc_snmp_node1_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="26:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;26:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="1175" queue-time="0" op-digest="da20bfed231d75a3b22f97eb06bb445f" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;4:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;29:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="874" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_monitor_60000" operation_key="apc_snmp_node2_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="30:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;30:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="789" queue-time="0" op-digest="5b8d168b9627dad87e1ba2edace17f1e" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;6:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_migrate_from_0" operation="migrate_from" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;25:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="75" rc-code="0" op-status="0" interval="0" last-rc-change="1600925198" last-run="1600925198" exec-time="551" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f" migrate_source="mk-a02n02" migrate_target="mk-a02n01"/>
<lrm_rsc_op id="srv07-el6_monitor_60000" operation_key="srv07-el6_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="76" rc-code="0" op-status="0" interval="60000" last-rc-change="1600925201" exec-time="541" queue-time="0" op-digest="65d0f0c9227f2593835f5de6c9cb9d0e"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;10:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="593" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="547" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_last_failure_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;7:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="547" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_monitor_60000" operation_key="srv01-sql_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:79:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;24:79:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="69" rc-code="0" op-status="0" interval="60000" last-rc-change="1600924960" exec-time="564" queue-time="0" op-digest="0434e67501e3e7af47a547723c35b411"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;8:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n01" call-id="68" rc-code="7" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="546" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="1">
<instance_attributes id="status-1"/>
</transient_attributes>
</node_state>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="11:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;11:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1600870206" last-run="1600870206" exec-time="2" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="849" queue-time="1" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_monitor_60000" operation_key="apc_snmp_node1_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;24:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="755" queue-time="0" op-digest="9dd197b1c8871a78c74a32b26949998d" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;13:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="27:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;27:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="106" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
<lrm_rsc_op id="ipmilan_node2_monitor_60000" operation_key="ipmilan_node2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;28:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870208" exec-time="87" queue-time="0" op-digest="467ef5117cbb737e5c6fc23b58809791" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;15:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1600870207" last-run="1600870207" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;31:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="872" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_monitor_60000" operation_key="apc_snmp_node2_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="32:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;32:0:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600870209" exec-time="759" queue-time="0" op-digest="910a16919098d7bca091e972cf8844f5" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="18:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;18:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="564" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;19:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="558" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:7;20:0:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600870208" last-run="1600870208" exec-time="562" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;21:85:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="64" rc-code="0" op-status="0" interval="0" last-rc-change="1600925199" last-run="1600925199" exec-time="1881" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f" migrate_source="mk-a02n02" migrate_target="mk-a02n01"/>
<lrm_rsc_op id="srv07-el6_last_failure_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;9:78:7:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="55" rc-code="0" op-status="0" interval="0" last-rc-change="1600924959" last-run="1600924959" exec-time="552" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
<lrm_rsc_op id="srv07-el6_monitor_60000" operation_key="srv07-el6_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:83:0:829209fd-35f2-4626-a9cd-f8a50a62871e" transition-magic="0:0;23:83:0:829209fd-35f2-4626-a9cd-f8a50a62871e" exit-reason="" on_node="mk-a02n02" call-id="61" rc-code="0" op-status="0" interval="60000" last-rc-change="1600925173" exec-time="539" queue-time="0" op-digest="65d0f0c9227f2593835f5de6c9cb9d0e"/>
</lrm_resource>
</lrm_resources>
</lrm>
<transient_attributes id="2">
<instance_attributes id="status-2"/>
</transient_attributes>
</node_state>
</status>
</cib>
';
my $not_in_cluster = $anvil->Cluster->parse_cib({debug => 2, cib => $cib});
if ($not_in_cluster)
{
print "This node isn't in the cluster.\n";

Loading…
Cancel
Save