* Moved Database->check_condition_age to Alert.

* Created (but not finished) scan-apc-pdu
* Added support to tracking maintenance-mode for nodes in Cluster->parse_cib
* Created Remote->read_snmp_oid().
* Created Server->get_definition.
* Updated Server->get_status() to write-out server XML files on-demand.
* Finished scan-cluster.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 4 years ago
parent 5d89357c16
commit d677d19ca0
  1. 2
      Anvil/Tools.pm
  2. 117
      Anvil/Tools/Alert.pm
  3. 123
      Anvil/Tools/Cluster.pm
  4. 131
      Anvil/Tools/Database.pm
  5. 171
      Anvil/Tools/Remote.pm
  6. 4
      Anvil/Tools/ScanCore.pm
  7. 145
      Anvil/Tools/Server.pm
  8. 41
      notes
  9. 12
      ocf/alteeve/server
  10. 1
      rpm/SPECS/anvil.spec
  11. 11
      scancore-agents/scan-apc-pdu/Striker-MIB.txt
  12. 2624
      scancore-agents/scan-apc-pdu/scan-apc-pdu
  13. 256
      scancore-agents/scan-apc-pdu/scan-apc-pdu.sql
  14. 41
      scancore-agents/scan-apc-pdu/scan-apc-pdu.xml
  15. 524
      scancore-agents/scan-cluster/scan-cluster
  16. 608
      scancore-agents/scan-cluster/scan-cluster.sql
  17. 24
      scancore-agents/scan-cluster/scan-cluster.xml
  18. 5
      scancore-agents/scan-hardware/scan-hardware
  19. 6
      scancore-agents/scan-server/scan-server
  20. 1
      share/words.xml
  21. 1
      tools/anvil-daemon
  22. 389
      tools/test.pl

@ -1195,6 +1195,8 @@ sub _set_paths
rsync => "/usr/bin/rsync",
sed => "/usr/bin/sed",
'shutdown' => "/usr/sbin/shutdown",
snmpget => "/usr/bin/snmpget",
snmpset => "/usr/bin/snmpset",
'ssh-keygen' => "/usr/bin/ssh-keygen",
'ssh-keyscan' => "/usr/bin/ssh-keyscan",
'stat' => "/usr/bin/stat",

@ -12,6 +12,7 @@ my $THIS_FILE = "Alert.pm";
### Methods;
# check_alert_sent
# check_condition_age
# error
# register
@ -241,6 +242,122 @@ WHERE
return($changed);
}
=head2 check_condition_age
This checks to see how long ago a given condition (variable, really) has been set. This is generally used when a program, often a scan agent, wants to wait to see if a given state persists before sending an alert and/or taking an action.
A common example is seeing how long power has been lost, if a lost sensor is going to return, etc.
The age of the condition is returned, in seconds. If there is a problem, C<< !!error!! >> is returned.
Parameters;
=head3 clear (optional)
When set to C<< 1 >>, if the condition exists, it is cleared. If the condition does not exist, nothing happens.
=head3 name (required)
This is the name of the condition being set. It's a free-form string, but generally in a format like C<< <scan_agent_name>::<condition_name> >>.
=head3 host_uuid (optional)
If a condition is host-specific, this can be set to the caller's C<< host_uuid >>. Generally this is needed, save for conditions related to hosted servers that are not host-bound.
=cut
sub check_condition_age
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Database->check_condition_age()" }});
my $clear = defined $parameter->{clear} ? $parameter->{clear} : 0;
my $name = defined $parameter->{name} ? $parameter->{name} : "";
my $host_uuid = defined $parameter->{host_uuid} ? $parameter->{host_uuid} : "NULL";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
clear => $clear,
name => $name,
host_uuid => $host_uuid,
}});
if (not $name)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Database->check_condition_age()", parameter => "name" }});
return("!!error!!");
}
my $age = 0;
my $source_table = $host_uuid ? "hosts" : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { source_table => $source_table }});
# See if this variable has been set yet.
my ($variable_value, $variable_uuid, $epoch_modified_date, $modified_date) = $anvil->Database->read_variable({
variable_name => $name,
variable_source_table => $source_table,
variable_source_uuid => $host_uuid,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
variable_value => $variable_value,
variable_uuid => $variable_uuid,
epoch_modified_date => $epoch_modified_date,
modified_date => $modified_date,
}});
if ($variable_uuid)
{
# Are we clearing?
if ($clear)
{
# Yup
$variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_uuid => $variable_uuid,
variable_value => "clear",
update_value_only => 1,
});
}
# if the value was 'clear', change it to 'set'.
if ($variable_value eq "clear")
{
# Set it.
$variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_uuid => $variable_uuid,
variable_value => "set",
update_value_only => 1,
});
}
else
{
# How old is it?
$age = time - $epoch_modified_date;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { age => $age }});
return($age);
}
}
elsif (not $clear)
{
# New, set it.
my $variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_name => $name,
variable_value => "set",
variable_default => "set",
variable_description => "striker_0278",
variable_section => "conditions",
variable_source_uuid => $host_uuid,
variable_source_table => $source_table,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { variable_uuid => $variable_uuid }});
}
return($age);
}
=head2 register
This registers an alert to be sent later by C<< Email->send_alerts >>.

@ -772,10 +772,26 @@ sub parse_cib
$anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{in_ccm} = "false";
$anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{crmd} = "offline";
$anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'join'} = "down";
$anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'maintenance-mode'} = "off";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::cib::node_state::${node_id}::in_ccm" => $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{in_ccm},
"cib::parsed::cib::node_state::${node_id}::crmd" => $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{crmd},
"cib::parsed::cib::node_state::${node_id}::join" => $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'join'},
"cib::parsed::cib::node_state::${node_id}::maintenance-mode" => $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'maintenance-mode'},
}});
}
}
foreach my $instance_attributes ($node->findnodes('./instance_attributes'))
{
my $instance_attributes_id = $instance_attributes->{id};
foreach my $nvpair ($instance_attributes->findnodes('./nvpair'))
{
my $id = $nvpair->{id};
my $name = $nvpair->{name};
my $value = $nvpair->{value};
$anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{$name} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::cib::node_state::${node_id}::${name}" => $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{$name},
}});
}
}
@ -943,6 +959,48 @@ sub parse_cib
}
}
# Set some cluster value defaults.
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = "false";
foreach my $nvpair_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}})
{
my $variable = $anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}{$nvpair_id}{name};
my $value = $anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}{$nvpair_id}{value};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:nvpair_id' => $nvpair_id,
's2:variable' => $variable,
's3:value' => $value,
}});
if ($variable eq "stonith-max-attempts")
{
$anvil->data->{cib}{parsed}{data}{stonith}{'max-attempts'} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::stonith::max-attempts" => $anvil->data->{cib}{parsed}{data}{stonith}{'max-attempts'},
}});
}
if ($variable eq "stonith-enabled")
{
$anvil->data->{cib}{parsed}{data}{stonith}{enabled} = $value eq "true" ? 1 : 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::stonith::enabled" => $anvil->data->{cib}{parsed}{data}{stonith}{enabled},
}});
}
if ($variable eq "cluster-name")
{
$anvil->data->{cib}{parsed}{data}{cluster}{name} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::cluster::name" => $anvil->data->{cib}{parsed}{data}{cluster}{name},
}});
}
if ($variable eq "maintenance-mode")
{
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::cluster::maintenance-mode" => $anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'},
}});
}
}
# Pull some data out for easier access.
$anvil->data->{cib}{parsed}{peer}{ready} = "";
$anvil->data->{cib}{parsed}{peer}{name} = "";
@ -950,6 +1008,7 @@ sub parse_cib
{
# The "coming up" order is 'in_ccm' then 'crmd' then 'join'.
my $node_id = $anvil->data->{cib}{parsed}{data}{node}{$node_name}{id};
my $maintenance_mode = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'maintenance-mode'} eq "on" ? 1 : 0; # 'on' or 'off' - Node is not monitoring resources
my $in_ccm = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{in_ccm} eq "true" ? 1 : 0; # 'true' or 'false' - Corosync member
my $crmd = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{crmd} eq "online" ? 1 : 0; # 'online' or 'offline' - In corosync process group
my $join = $anvil->data->{cib}{parsed}{cib}{node_state}{$node_id}{'join'} eq "member" ? 1 : 0; # 'member' or 'down' - Completed controller join process
@ -957,17 +1016,29 @@ sub parse_cib
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:node_name' => $node_name,
's2:node_id' => $node_id,
's3:in_ccm' => $in_ccm,
's4:crmd' => $crmd,
's5:join' => $join,
's6:ready' => $ready,
's3:maintenance_mode' => $maintenance_mode,
's4:in_ccm' => $in_ccm,
's5:crmd' => $crmd,
's6:join' => $join,
's7:ready' => $ready,
}});
# If the global maintenance mode is set, set maintenance mode to true.
if (($anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'}) && ($anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} eq "true"))
{
$maintenance_mode = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { maintenance_mode => $maintenance_mode }});
}
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{pacemaker_id} = $node_id;
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{'maintenance-mode'} = $maintenance_mode;
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{in_ccm} = $in_ccm;
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{crmd} = $crmd;
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{'join'} = $join;
$anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{ready} = $ready;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::node::${node_name}::node_state::pacemaker_id" => $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{pacemaker_id},
"cib::parsed::data::node::${node_name}::node_state::maintenance_mode" => $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{'maintenance-mode'},
"cib::parsed::data::node::${node_name}::node_state::in_ccm" => $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{in_ccm},
"cib::parsed::data::node::${node_name}::node_state::crmd" => $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{crmd},
"cib::parsed::data::node::${node_name}::node_state::join" => $anvil->data->{cib}{parsed}{data}{node}{$node_name}{node_state}{'join'},
@ -997,48 +1068,6 @@ sub parse_cib
}
}
# Set some cluster value defaults.
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = "false";
foreach my $nvpair_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}})
{
my $variable = $anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}{$nvpair_id}{name};
my $value = $anvil->data->{cib}{parsed}{configuration}{crm_config}{cluster_property_set}{nvpair}{$nvpair_id}{value};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
's1:nvpair_id' => $nvpair_id,
's2:variable' => $variable,
's3:value' => $value,
}});
if ($variable eq "stonith-max-attempts")
{
$anvil->data->{cib}{parsed}{data}{stonith}{'max-attempts'} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::stonith::max-attempts" => $anvil->data->{cib}{parsed}{data}{stonith}{'max-attempts'},
}});
}
if ($variable eq "stonith-enabled")
{
$anvil->data->{cib}{parsed}{data}{stonith}{enabled} = $value eq "true" ? 1 : 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::stonith::enabled" => $anvil->data->{cib}{parsed}{data}{stonith}{enabled},
}});
}
if ($variable eq "cluster-name")
{
$anvil->data->{cib}{parsed}{data}{cluster}{name} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::cluster::name" => $anvil->data->{cib}{parsed}{data}{cluster}{name},
}});
}
if ($variable eq "maintenance-mode")
{
$anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'} = $value;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"cib::parsed::data::cluster::maintenance-mode" => $anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'},
}});
}
}
# Fencing devices and levels.
my $delay_set = 0;
foreach my $primitive_id (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{cib}{resources}{primitive}})
@ -1626,7 +1655,7 @@ sub which_node
# Load Anvil! systems.
if ((not exists $anvil->data->{anvils}{anvil_name}) && (not $anvil->data->{anvils}{anvil_name}))
{
$anvil->Database->load_anvils({debug => $debug});
$anvil->Database->get_anvils({debug => $debug});
}
foreach my $anvil_name (sort {$a cmp $b} keys %{$anvil->data->{anvils}{anvil_name}})

@ -17,7 +17,6 @@ my $THIS_FILE = "Database.pm";
### Methods;
# archive_database
# check_condition_age
# check_lock_age
# check_for_schema
# configure_pgsql
@ -303,121 +302,6 @@ sub archive_database
}
=head2 check_condition_age
This checks to see how long ago a given condition (variable, really) has been set. This is generally used when a program, often a scan agent, wants to wait to see if a given state persists before sending an alert and/or taking an action.
A common example is seeing how long power has been lost, if a lost sensor is going to return, etc.
The age of the condition is returned, in seconds. If there is a problem, C<< !!error!! >> is returned.
Parameters;
=head3 clear (optional)
When set to C<< 1 >>, if the condition exists, it is cleared. If the condition does not exist, nothing happens.
=head3 name (required)
This is the name of the condition being set. It's a free-form string, but generally in a format like C<< <scan_agent_name>::<condition_name> >>.
=head3 host_uuid (optional)
If a condition is host-specific, this can be set to the caller's C<< host_uuid >>. Generally this is needed, save for conditions related to hosted servers that are not host-bound.
=cut
sub check_condition_age
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Database->check_condition_age()" }});
my $clear = defined $parameter->{clear} ? $parameter->{clear} : 0;
my $name = defined $parameter->{name} ? $parameter->{name} : "";
my $host_uuid = defined $parameter->{host_uuid} ? $parameter->{host_uuid} : "NULL";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
clear => $clear,
name => $name,
host_uuid => $host_uuid,
}});
if (not $name)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Database->check_condition_age()", parameter => "name" }});
return("!!error!!");
}
my $age = 0;
my $source_table = $host_uuid ? "hosts" : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { source_table => $source_table }});
# See if this variable has been set yet.
my ($variable_value, $variable_uuid, $epoch_modified_date, $modified_date) = $anvil->Database->read_variable({
variable_name => $name,
variable_source_table => $source_table,
variable_source_uuid => $host_uuid,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
variable_value => $variable_value,
variable_uuid => $variable_uuid,
epoch_modified_date => $epoch_modified_date,
modified_date => $modified_date,
}});
if ($variable_uuid)
{
# Are we clearing?
if ($clear)
{
# Yup
$variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_uuid => $variable_uuid,
variable_value => "clear",
update_value_only => 1,
});
}
# if the value was 'clear', change it to 'set'.
if ($variable_value eq "clear")
{
# Set it.
$variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_uuid => $variable_uuid,
variable_value => "set",
update_value_only => 1,
});
}
else
{
# How old is it?
$age = time - $epoch_modified_date;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { age => $age }});
return($age);
}
}
elsif (not $clear)
{
# New, set it.
my $variable_uuid = $anvil->Database->insert_or_update_variables({
debug => $debug,
variable_name => $name,
variable_value => "set",
variable_default => "set",
variable_description => "striker_0278",
variable_section => "conditions",
variable_source_uuid => $host_uuid,
variable_source_table => $source_table,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { variable_uuid => $variable_uuid }});
}
return($age);
}
=head2 check_lock_age
This checks to see if 'sys::database::local_lock_active' is set. If it is, its age is checked and if the age is >50% of sys::database::locking_reap_age, it will renew the lock.
@ -2411,7 +2295,7 @@ FROM
host_name => $host_name,
host_type => $host_type,
host_key => $host_key,
host_ipmi => $host_ipmi =~ /passw/ ? $anvil->Log->is_secure($host_ipmi) : $host_ipmi,
host_ipmi => $host_ipmi,
modified_date => $modified_date,
};
@ -4211,10 +4095,17 @@ sub insert_or_update_anvils
}
}
elsif ((not $anvil_name) && (not $anvil_uuid))
{
# Can we find the anvil_uuid?
$anvil_uuid = $anvil->Cluster->get_anvil_uuid({debug => $debug});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { anvil_uuid => $anvil_uuid }});
if (not $anvil_uuid)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0127", variables => { table => "anvils" }});
return("");
}
}
# If we don't have a UUID, see if we can find one for the given anvil name.
if (not $anvil_uuid)
@ -4417,6 +4308,7 @@ SET
WHERE
anvil_uuid = ".$anvil->Database->quote($anvil_uuid)."
";
$query =~ s/'NULL'/NULL/g;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, secure => 1, list => { query => $query }});
$anvil->Database->write({uuid => $uuid, query => $query, source => $file ? $file." -> ".$THIS_FILE : $THIS_FILE, line => $line ? $line." -> ".__LINE__ : __LINE__});
}
@ -10005,6 +9897,7 @@ SET
WHERE
session_uuid = ".$anvil->Database->quote($session_uuid)."
";
$query =~ s/'NULL'/NULL/g;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { query => $query }});
$anvil->Database->write({uuid => $uuid, query => $query, source => $file ? $file." -> ".$THIS_FILE : $THIS_FILE, line => $line ? $line." -> ".__LINE__ : __LINE__});
}
@ -12245,7 +12138,7 @@ sub manage_anvil_conf
file => $anvil->data->{path}{configs}{'anvil.conf'},
force_read => 1,
port => $port,
password => $anvil->Log->is_secure($password),
password => $password,
remote_user => $remote_user,
secure => 1,
target => $target,
@ -12542,7 +12435,7 @@ sub manage_anvil_conf
group => "admin",
mode => "0644",
overwrite => 1,
password => $anvil->Log->is_secure($password),
password => $password,
port => $port,
remote_user => $remote_user,
target => $target,

@ -17,6 +17,7 @@ my $THIS_FILE = "Remote.pm";
### Methods;
# add_target_to_known_hosts
# call
# read_snmp_oid
# test_access
# _call_ssh_keyscan
# _check_known_hosts_for_target
@ -716,6 +717,172 @@ sub call
return($output, $error, $return_code);
}
=head2 read_snmp_oid
This connects to a remote machine using SNMP and reads (if possible) the OID specified. If unable to reach the target device, C<< !!no_connection!! >> is returned. If there is a problem with the call made to this method, C<< !!error!! >> is returned.
Otherwise, two values are returned; first the data and second the data type.
Parameters;
=head3 community (optional)
This is the SNMP community used to connect to.
=head3 mib (optional)
If set to a path to a file, the file is treated as a custom MIB to be fed into C<< snmpget >>
=head3 oid (required)
This is the OID string to query.
=head3 target (required)
This is the IP or (resolvable) host name to query.
=head3 version (optional, default '2c')
This is the SNMP protocol version to use when connecting to the target.
=cut
sub read_snmp_oid
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Remote->read_snmp_oid()" }});
my $community = defined $parameter->{community} ? $parameter->{community} : "";
my $mib = defined $parameter->{mib} ? $parameter->{mib} : "";
my $oid = defined $parameter->{oid} ? $parameter->{oid} : "";
my $target = defined $parameter->{target} ? $parameter->{target} : "";
my $version = defined $parameter->{version} ? $parameter->{version} : "2c";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, secure => 0, list => {
community => $community,
mib => $mib,
oid => $oid,
target => $target,
version => $version,
}});
if (not $oid)
{
# Um, what are we supposed to read?
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Remote->read_snmp_oid()", parameter => "oid" }});
die;
return("!!error!!");
}
if (not $target)
{
# Who ya gonna call? No, seriously, I have no idea...
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Remote->read_snmp_oid()", parameter => "target" }});
die;
return("!!error!!");
}
if (($mib) && (not -r $mib))
{
# Bad MIB path
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0163", variables => { mib => $mib }});
die;
return("!!error!!");
}
my $data_type = "unknown";
my $shell_call = $anvil->data->{path}{exe}{snmpget}." -On";
if ($community)
{
$shell_call .= " -c ".$community;
}
if ($mib)
{
$shell_call .= " -m ".$mib;
}
$shell_call .= " -v ".$version." ".$target." ".$oid;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, secure => 0, list => { shell_call => $shell_call }});
my ($output, $return_code) = $anvil->System->call({debug => $debug, shell_call => $shell_call});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
output => $output,
return_code => $return_code,
}});
my $value = "#!no_value!#";
foreach my $line (split/\n/, $output)
{
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { line => $line }});
if ($line =~ /No Response/i)
{
$value = "#!no_connection!#";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { value => $value }});
}
elsif (($line =~ /STRING: "(.*)"$/i) or ($line =~ /STRING: (.*)$/i))
{
$value = $1;
$data_type = "string";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
elsif ($line =~ /INTEGER: (\d+)$/i)
{
$value = $1;
$data_type = "integer";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
elsif ($line =~ /Hex-STRING: (.*)$/i)
{
$value = $1;
$data_type = "hex-string";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
elsif ($line =~ /Gauge32: (.*)$/i)
{
$value = $1;
$data_type = "guage32";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
elsif ($line =~ /Timeticks: \((\d+)\) /i)
{
$value = $1;
$data_type = "timeticks";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
elsif ($line =~ /No Such Instance/i)
{
$value = "--";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { value => $value }});
}
elsif ($line =~ /^(.*?): (.*$)/i)
{
$data_type = $1;
$value = $2;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
value => $value,
data_type => $data_type,
}});
}
}
return($value, $data_type);
}
=head2 test_access
This attempts to log into the target to verify that the target is up and reachable. It returns C<< 1 >> on access, C<< 0 >> otherwise.
@ -759,7 +926,7 @@ sub test_access
my $target = defined $parameter->{target} ? $parameter->{target} : "";
my $user = defined $parameter->{user} ? $parameter->{user} : getpwuid($<);
my $access = 0;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, secure => 0, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, secure => 0, list => {
password => $anvil->Log->is_secure($password),
port => $port,
target => $target,
@ -785,7 +952,7 @@ sub test_access
$access = 1;
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { access => $access }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { access => $access }});
return($access);
}

@ -163,13 +163,13 @@ sub agent_startup
# Read in our word strings.
my $words_file = $anvil->data->{path}{directories}{scan_agents}."/".$agent."/".$agent.".xml";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { words_file => $words_file }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { words_file => $words_file }});
my $problem = $anvil->Words->read({
debug => $debug,
file => $words_file,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { problem => $problem }});
if ($problem)
{

@ -14,6 +14,7 @@ my $THIS_FILE = "Server.pm";
### Methods;
# boot_virsh
# find
# get_definition
# get_runtime
# get_status
# map_network
@ -344,6 +345,70 @@ sub find
}
=head2 get_definition
This returns the server definition XML for a server.
Parameters;
=head3 server_uuid (optional, if 'server_name' used. required if not)
If provided, this is the specific server's definition we'll return. If it is not provided, C<< server_name >> is required.
=head3 server_name (optional)
If provided, and C<< server_uuid >> is not, the server will be searched for using this name. If C<< anvil_uuid >> is included, the name will be searched on the appropriate Anvil! system only.
=head3 anvil_uuid (optional)
If set along with C<< server_name >>, the search for the server's XML will be restricted to the specified Anvil! system.
=cut
sub get_definition
{
my $self = shift;
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->get_runtime()" }});
my $definition_xml = "";
my $anvil_uuid = defined $parameter->{anvil_uuid} ? $parameter->{anvil_uuid} : "";
my $server_name = defined $parameter->{server_name} ? $parameter->{server_name} : "";
my $server_uuid = defined $parameter->{server_uuid} ? $parameter->{server_uuid} : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
anvil_uuid => $anvil_uuid,
server_name => $server_name,
server_uuid => $server_uuid,
}});
$server_uuid = $anvil->Get->server_uuid_from_name({
debug => $debug,
server_name => $server_name,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { server_uuid => $server_uuid }});
if ($server_uuid)
{
my $query = "SELECT server_definition_xml FROM server_definitions WHERE server_definition_server_uuid = ".$anvil->Database->quote($server_uuid).";";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { query => $query }});
my $results = $anvil->Database->query({query => $query, source => $THIS_FILE, line => __LINE__});
my $count = @{$results};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
results => $results,
count => $count,
}});
if ($count == 1)
{
# Found it
$definition_xml = defined $results->[0]->[0] ? $results->[0]->[0] : "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { definition_xml => $definition_xml }});
}
}
return($definition_xml);
}
=head2 get_runtime
This returns the number of seconds that a (virtual) server has been running on this host.
@ -365,7 +430,7 @@ sub get_runtime
my $parameter = shift;
my $anvil = $self->parent;
my $debug = defined $parameter->{debug} ? $parameter->{debug} : 3;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->get_status()" }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0125", variables => { method => "Server->get_runtime()" }});
my $runtime = 0;
my $server = defined $parameter->{server} ? $parameter->{server} : "";
@ -528,6 +593,8 @@ sub get_status
}
# Now get the on-disk XML.
my $definition_file = $anvil->data->{path}{directories}{shared}{definitions}."/".$server.".xml";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { definition_file => $definition_file }});
($anvil->data->{server}{$host}{$server}{from_disk}{xml}) = $anvil->Storage->read_file({
debug => $debug,
password => $password,
@ -535,7 +602,61 @@ sub get_status
remote_user => $remote_user,
target => $target,
force_read => 1,
file => $anvil->data->{path}{directories}{shared}{definitions}."/".$server.".xml",
file => $definition_file,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"server::${host}::${server}::from_disk::xml" => $anvil->data->{server}{$host}{$server}{from_disk}{xml},
}});
if (($anvil->data->{server}{$host}{$server}{from_disk}{xml} eq "!!error!!") or (not $anvil->data->{server}{$host}{$server}{from_disk}{xml}))
{
# Failed to read it. Can we write it?
my $definition_xml = "";
if ($anvil->data->{server}{$host}{$server}{from_virsh}{xml})
{
$definition_xml = $anvil->data->{server}{$host}{$server}{from_virsh}{xml};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { definition_xml => $definition_xml }});
}
else
{
# Read in from the database.
$definition_xml = $anvil->Server->get_definition({
debug => $debug,
server_name => $server,
anvil_uuid => $anvil->Cluster->get_anvil_uuid({debug => $debug}),
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { definition_xml => $definition_xml }});
}
if ($definition_xml)
{
# Write it to disk
my ($failed) = $anvil->Storage->write_file({
secure => 1,
file => $definition_file,
body => $definition_xml,
overwrite => 1,
password => $password,
port => $port,
remote_user => $remote_user,
target => $target,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { failed => $failed }});
if ($failed)
{
# Simething went weong.
$anvil->data->{server}{$host}{$server}{from_disk}{xml} = "";
return(1);
}
# Now try to read it back.
($anvil->data->{server}{$host}{$server}{from_disk}{xml}) = $anvil->Storage->read_file({
debug => $debug,
password => $password,
port => $port,
remote_user => $remote_user,
target => $target,
force_read => 1,
file => $definition_file,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"server::${host}::${server}::from_disk::xml" => $anvil->data->{server}{$host}{$server}{from_disk}{xml},
@ -544,6 +665,26 @@ sub get_status
{
# Failed to read it.
$anvil->data->{server}{$host}{$server}{from_disk}{xml} = "";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"server::${host}::${server}::from_disk::xml" => $anvil->data->{server}{$host}{$server}{from_disk}{xml},
}});
}
else
{
# Load
$anvil->Server->parse_definition({
debug => $debug,
host => $this_host,
server => $server,
source => "from_disk",
definition => $anvil->data->{server}{$host}{$server}{from_disk}{xml},
});
}
}
else
{
$anvil->data->{server}{$host}{$server}{from_disk}{xml} = "";
}
}
else
{

41
notes

@ -435,6 +435,45 @@ CREATE TABLE updated (
);
ALTER TABLE updated OWNER TO admin;
DROP FUNCTION history_anvils() CASCADE;
CREATE FUNCTION history_anvils() RETURNS trigger
AS $$
DECLARE
history_anvils RECORD;
BEGIN
SELECT INTO history_anvils * FROM anvils WHERE anvil_uuid = new.anvil_uuid;
INSERT INTO history.anvils
(anvil_uuid,
anvil_name,
anvil_description,
anvil_password,
anvil_node1_host_uuid,
anvil_node2_host_uuid,
anvil_dr1_host_uuid,
anvil_fencing_enabled,
modified_date)
VALUES
(history_anvils.anvil_uuid,
history_anvils.anvil_name,
history_anvils.anvil_description,
history_anvils.anvil_password,
history_anvils.anvil_node1_host_uuid,
history_anvils.anvil_node2_host_uuid,
history_anvils.anvil_dr1_host_uuid,
history_anvils.anvil_fencing_enabled,
history_anvils.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_anvils() OWNER TO admin;
CREATE TRIGGER trigger_anvils
AFTER INSERT OR UPDATE ON anvils
FOR EACH ROW EXECUTE PROCEDURE history_anvils();
COMMIT;
============
@ -719,7 +758,7 @@ pcs resource create srv07-el6 ocf:alteeve:server name="srv07-el6" meta allow-mig
pcs constraint location srv07-el6 prefers mk-a02n01=200 mk-a02n02=100
pcs resource enable srv07-el6
- or -
pcs resource update srv07-el6 ocf:alteeve:server name="srv07-el6" meta allow-migrate="true" migrate_to="INFINITY" stop="INFINITY" op monitor interval="60" on-fail="block"
pcs resource update srv07-el6 ocf:alteeve:server name="srv07-el6" meta allow-migrate="true" target-role="stopped" op monitor interval="60" start timeout="INFINITY" on-fail="block" stop timeout="INFINITY" on-fail="block" migrate_to timeout="INFINITY"
# Test
stonith_admin --fence el8-a01n02 --verbose; crm_error $?

@ -101,7 +101,7 @@ $anvil->Log->secure({set => 1});
# If we can connect to a database, we'll set/clear the 'migrating' flag during migrations
$anvil->Database->connect();
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, secure => 0, key => "log_0132"});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 3, secure => 0, key => "log_0132"});
if (not $anvil->data->{sys}{database}{connections})
{
# No databases, exit.
@ -996,7 +996,7 @@ sub server_status
# the server is failed, return OCF_ERR_GENERIC (1).
my $state = "";
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0521", variables => { server => $server }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 3, key => "log_0521", variables => { server => $server }});
if (not $anvil->data->{environment}{OCF_RESKEY_CRM_meta_timeout})
{
@ -1158,7 +1158,7 @@ pmsuspended - The domain has been suspended by guest power management, e.g. ente
else
{
# In some fashion or another, the server is running. Exit with OCF_SUCCESS (rc: 0)
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0527", variables => { 'state' => $state }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 3, key => "log_0527", variables => { 'state' => $state }});
$anvil->nice_exit({exit_code => 0});
}
}
@ -1458,8 +1458,6 @@ sub validate_all
my ($anvil) = @_;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0361"});
### TODO: When we have actual Anvil! systems, connect to the peers (nodes / DR) for this host and see
### if the server is running elsewhere.
my $server = $anvil->data->{environment}{OCF_RESKEY_name};
my $source = defined $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_source} ? $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_source} : "";
my $target = defined $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} ? $anvil->data->{environment}{OCF_RESKEY_CRM_meta_migrate_target} : "";
@ -1469,9 +1467,9 @@ sub validate_all
target => $target,
}});
# Read in an parse the server's XML.
# Read in and parse the server's XML.
$anvil->System->check_storage({debug => 3});
$anvil->Server->get_status({debug => 3, server => $server});
$anvil->Server->get_status({debug => 2, server => $server});
# Is the name in the definition file what we expect (and did we read the XML data at all)?
validate_name($anvil);

@ -46,6 +46,7 @@ Requires: iproute
Requires: lsscsi
Requires: mailx
Requires: mlocate
Requires: net-snmp-utils
Requires: nvme-cli
Requires: perl-Capture-Tiny
Requires: perl-Data-Dumper

@ -0,0 +1,11 @@
-- This is needed by scan-apc-pdu to trick/force .1.3.6.1.2.1.2.2.1.6.2 to return Hex-STRING. --
PowerNet-MIB DEFINITIONS ::= BEGIN
IMPORTS
enterprises, IpAddress, Gauge, TimeTicks FROM RFC1155-SMI
DisplayString FROM RFC1213-MIB
OBJECT-TYPE FROM RFC-1212
TRAP-TYPE FROM RFC-1215;
-- IMPORTS End

File diff suppressed because it is too large Load Diff

@ -0,0 +1,256 @@
-- This is the database schema for the 'remote-access Scan Agent'.
CREATE TABLE scan_apc_pdus (
scan_apc_pdu_uuid uuid not null primary key, -- This is set by the target, not by us!
scan_apc_pdu_fence_uuid uuid not null, --
scan_apc_pdu_serial_number text not null, --
scan_apc_pdu_model_number text not null, --
scan_apc_pdu_manufacture_date text not null, --
scan_apc_pdu_firmware_version text not null, --
scan_apc_pdu_hardware_version text not null, --
scan_apc_pdu_ipv4_address text not null, --
scan_apc_pdu_mac_address text not null, --
scan_apc_pdu_mtu_size numeric not null, --
scan_apc_pdu_link_speed numeric not null, -- in bits-per-second, set to '0' when we lose access
scan_apc_pdu_phase_count numeric not null, --
scan_apc_pdu_outlet_count numeric not null, --
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_apc_pdu_fence_uuid) REFERENCES fences(fence_uuid)
);
ALTER TABLE scan_apc_pdus OWNER TO admin;
CREATE TABLE history.scan_apc_pdus (
history_id bigserial,
scan_apc_pdu_uuid uuid,
scan_apc_pdu_fence_uuid uuid,
scan_apc_pdu_serial_number text,
scan_apc_pdu_model_number text,
scan_apc_pdu_manufacture_date text,
scan_apc_pdu_firmware_version text,
scan_apc_pdu_hardware_version text,
scan_apc_pdu_ipv4_address text,
scan_apc_pdu_mac_address text,
scan_apc_pdu_mtu_size numeric,
scan_apc_pdu_link_speed numeric,
scan_apc_pdu_phase_count numeric,
scan_apc_pdu_outlet_count numeric,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_apc_pdus OWNER TO admin;
CREATE FUNCTION history_scan_apc_pdus() RETURNS trigger
AS $$
DECLARE
history_scan_apc_pdus RECORD;
BEGIN
SELECT INTO history_scan_apc_pdus * FROM scan_apc_pdus WHERE scan_apc_pdu_uuid=new.scan_apc_pdu_uuid;
INSERT INTO history.scan_apc_pdus
(scan_apc_pdu_uuid,
scan_apc_pdu_fence_uuid,
scan_apc_pdu_serial_number,
scan_apc_pdu_model_number,
scan_apc_pdu_manufacture_date,
scan_apc_pdu_firmware_version,
scan_apc_pdu_hardware_version,
scan_apc_pdu_ipv4_address,
scan_apc_pdu_mac_address,
scan_apc_pdu_mtu_size,
scan_apc_pdu_link_speed,
scan_apc_pdu_phase_count,
scan_apc_pdu_outlet_count,
modified_date)
VALUES
(history_scan_apc_pdus.scan_apc_pdu_uuid,
history_scan_apc_pdus.scan_apc_pdu_fence_uuid,
history_scan_apc_pdus.scan_apc_pdu_serial_number,
history_scan_apc_pdus.scan_apc_pdu_model_number,
history_scan_apc_pdus.scan_apc_pdu_manufacture_date,
history_scan_apc_pdus.scan_apc_pdu_firmware_version,
history_scan_apc_pdus.scan_apc_pdu_hardware_version,
history_scan_apc_pdus.scan_apc_pdu_ipv4_address,
history_scan_apc_pdus.scan_apc_pdu_mac_address,
history_scan_apc_pdus.scan_apc_pdu_mtu_size,
history_scan_apc_pdus.scan_apc_pdu_link_speed,
history_scan_apc_pdus.scan_apc_pdu_phase_count,
history_scan_apc_pdus.scan_apc_pdu_outlet_count,
history_scan_apc_pdus.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_apc_pdus() OWNER TO admin;
CREATE TRIGGER trigger_scan_apc_pdus
AFTER INSERT OR UPDATE ON scan_apc_pdus
FOR EACH ROW EXECUTE PROCEDURE history_scan_apc_pdus();
-- Phases on the PDU
CREATE TABLE scan_apc_pdu_phases (
scan_apc_pdu_phase_uuid uuid not null primary key,
scan_apc_pdu_phase_scan_apc_pdu_uuid uuid not null, --
scan_apc_pdu_phase_number text not null, --
scan_apc_pdu_phase_current_amperage numeric not null, -- Max, low/high warn and high critical will be read from the PDU in the given pass.
scan_apc_pdu_phase_max_amperage numeric,
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_apc_pdu_phase_scan_apc_pdu_uuid) REFERENCES scan_apc_pdus(scan_apc_pdu_uuid)
);
ALTER TABLE scan_apc_pdu_phases OWNER TO admin;
CREATE TABLE history.scan_apc_pdu_phases (
history_id bigserial,
scan_apc_pdu_phase_uuid uuid,
scan_apc_pdu_phase_scan_apc_pdu_uuid uuid,
scan_apc_pdu_phase_number text,
scan_apc_pdu_phase_current_amperage numeric,
scan_apc_pdu_phase_max_amperage numeric,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_apc_pdu_phases OWNER TO admin;
CREATE FUNCTION history_scan_apc_pdu_phases() RETURNS trigger
AS $$
DECLARE
history_scan_apc_pdu_phases RECORD;
BEGIN
SELECT INTO history_scan_apc_pdu_phases * FROM scan_apc_pdu_phases WHERE scan_apc_pdu_phase_uuid=new.scan_apc_pdu_phase_uuid;
INSERT INTO history.scan_apc_pdu_phases
(scan_apc_pdu_phase_uuid,
scan_apc_pdu_phase_scan_apc_pdu_uuid,
scan_apc_pdu_phase_number,
scan_apc_pdu_phase_current_amperage,
scan_apc_pdu_phase_max_amperage,
modified_date)
VALUES
(history_scan_apc_pdu_phases.scan_apc_pdu_phase_uuid,
history_scan_apc_pdu_phases.scan_apc_pdu_phase_scan_apc_pdu_uuid,
history_scan_apc_pdu_phases.scan_apc_pdu_phase_number,
history_scan_apc_pdu_phases.scan_apc_pdu_phase_current_amperage,
history_scan_apc_pdu_phases.scan_apc_pdu_phase_max_amperage,
history_scan_apc_pdu_phases.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_apc_pdu_phases() OWNER TO admin;
CREATE TRIGGER trigger_scan_apc_pdu_phases
AFTER INSERT OR UPDATE ON scan_apc_pdu_phases
FOR EACH ROW EXECUTE PROCEDURE history_scan_apc_pdu_phases();
-- Phases on the PDU
CREATE TABLE scan_apc_pdu_outlets (
scan_apc_pdu_outlet_uuid uuid not null primary key,
scan_apc_pdu_outlet_scan_apc_pdu_uuid uuid not null,
scan_apc_pdu_outlet_number text not null,
scan_apc_pdu_outlet_name text not null,
scan_apc_pdu_outlet_on_phase text not null,
scan_apc_pdu_outlet_state text not null, -- on / off / unknown
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_apc_pdu_outlet_scan_apc_pdu_uuid) REFERENCES scan_apc_pdus(scan_apc_pdu_uuid)
);
ALTER TABLE scan_apc_pdu_outlets OWNER TO admin;
CREATE TABLE history.scan_apc_pdu_outlets (
history_id bigserial,
scan_apc_pdu_outlet_uuid uuid,
scan_apc_pdu_outlet_scan_apc_pdu_uuid uuid,
scan_apc_pdu_outlet_number text,
scan_apc_pdu_outlet_name text,
scan_apc_pdu_outlet_on_phase text,
scan_apc_pdu_outlet_state text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_apc_pdu_outlets OWNER TO admin;
CREATE FUNCTION history_scan_apc_pdu_outlets() RETURNS trigger
AS $$
DECLARE
history_scan_apc_pdu_outlets RECORD;
BEGIN
SELECT INTO history_scan_apc_pdu_outlets * FROM scan_apc_pdu_outlets WHERE scan_apc_pdu_outlet_uuid=new.scan_apc_pdu_outlet_uuid;
INSERT INTO history.scan_apc_pdu_outlets
(scan_apc_pdu_outlet_uuid,
scan_apc_pdu_outlet_scan_apc_pdu_uuid,
scan_apc_pdu_outlet_number,
scan_apc_pdu_outlet_name,
scan_apc_pdu_outlet_on_phase,
scan_apc_pdu_outlet_state,
modified_date)
VALUES
(history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_uuid,
history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_scan_apc_pdu_uuid,
history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_number,
history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_name,
history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_on_phase,
history_scan_apc_pdu_outlets.scan_apc_pdu_outlet_state,
history_scan_apc_pdu_outlets.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_apc_pdu_outlets() OWNER TO admin;
CREATE TRIGGER trigger_scan_apc_pdu_outlets
AFTER INSERT OR UPDATE ON scan_apc_pdu_outlets
FOR EACH ROW EXECUTE PROCEDURE history_scan_apc_pdu_outlets();
-- This stores various variables found for a given controller but not explicitely checked for (or that
-- change frequently).
CREATE TABLE scan_apc_pdu_variables (
scan_apc_pdu_variable_uuid uuid not null primary key, --
scan_apc_pdu_variable_scan_apc_pdu_uuid uuid not null, --
scan_apc_pdu_variable_is_temperature boolean not null, --
scan_apc_pdu_variable_name text not null, --
scan_apc_pdu_variable_value text not null, --
modified_date timestamp with time zone not null, --
FOREIGN KEY(scan_apc_pdu_variable_scan_apc_pdu_uuid) REFERENCES scan_apc_pdus(scan_apc_pdu_uuid)
);
ALTER TABLE scan_apc_pdu_variables OWNER TO admin;
CREATE TABLE history.scan_apc_pdu_variables (
history_id bigserial,
scan_apc_pdu_variable_uuid uuid,
scan_apc_pdu_variable_scan_apc_pdu_uuid uuid,
scan_apc_pdu_variable_is_temperature boolean,
scan_apc_pdu_variable_name text,
scan_apc_pdu_variable_value text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_apc_pdu_variables OWNER TO admin;
CREATE FUNCTION history_scan_apc_pdu_variables() RETURNS trigger
AS $$
DECLARE
history_scan_apc_pdu_variables RECORD;
BEGIN
SELECT INTO history_scan_apc_pdu_variables * FROM scan_apc_pdu_variables WHERE scan_apc_pdu_variable_uuid=new.scan_apc_pdu_variable_uuid;
INSERT INTO history.scan_apc_pdu_variables
(scan_apc_pdu_variable_uuid,
scan_apc_pdu_variable_scan_apc_pdu_uuid,
scan_apc_pdu_variable_is_temperature,
scan_apc_pdu_variable_name,
scan_apc_pdu_variable_value,
modified_date)
VALUES
(history_scan_apc_pdu_variables.scan_apc_pdu_variable_uuid,
history_scan_apc_pdu_variables.scan_apc_pdu_variable_scan_apc_pdu_uuid,
history_scan_apc_pdu_variables.scan_apc_pdu_variable_is_temperature,
history_scan_apc_pdu_variables.scan_apc_pdu_variable_name,
history_scan_apc_pdu_variables.scan_apc_pdu_variable_value,
history_scan_apc_pdu_variables.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_apc_pdu_variables() OWNER TO admin;
CREATE TRIGGER trigger_scan_apc_pdu_variables
AFTER INSERT OR UPDATE ON scan_apc_pdu_variables
FOR EACH ROW EXECUTE PROCEDURE history_scan_apc_pdu_variables();

@ -0,0 +1,41 @@
<?xml version="1.0" encoding="UTF-8"?>
<!--
Company: Alteeve's Niche, Inc.
License: GPL v2+
Author: Madison Kelly <mkelly@alteeve.ca>
NOTE: All string keys MUST be prefixed with the agent name! ie: 'scan_apc_pdu_log_0001'.
-->
<words>
<meta version="3.0.0" languages="en_CA,jp"/>
<!-- Canadian English -->
<language name="en_CA" long_name="Canadian English" description="ScanCore scan agent that monitors hardware, like RAM modules, CSS LED status, CPU information, etc.">
<!-- Alert entries -->
<key name="scan_apc_pdu_alert_0001"></key>
<!-- Error entries -->
<key name="scan_apc_pdu_error_0001">Failed to read the number of phases for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_error_0002">Able to now read the number of phases for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_error_0003">Failed to read the number of outlets for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_error_0004">Able to now read the number of outlets for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<!-- Log entries -->
<key name="scan_apc_pdu_log_0001">Starting: [#!variable!program!#].</key>
<!-- Message entries (usually meant to be alerts) -->
<key name="scan_apc_pdu_message_0001">No APC PDUs found as configured fence devices, nothing to do.</key>
<key name="scan_apc_pdu_message_0002">Failed to read the number of phases for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_message_0003">Able to now read the number of phases for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_message_0004">Failed to read the number of outlets for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_message_0005">Able to now read the number of outlets for the PDU: [#!variable!name!#] at IP: [#!variable!ip!#] (sn: #!variable!serial_number!#).</key>
<key name="scan_apc_pdu_message_0006">The PDU: [#!variable!name!#] with the serial number: [#!variable!serial_number!#] at the IP address: [#!variable!ip_address!#] has returned.</key>
<key name="scan_apc_pdu_message_0007">The model of the PDU: [#!variable!name!#] has changed from; [#!variable!old_model_number!#] to: [#!variable!new_model_number!#]</key>
<!-- Units -->
<key name="scan_apc_pdu_unit_0001"></key>
</language>
</words>

@ -10,6 +10,7 @@
# Exit codes;
# 0 = Normal exit.
# 1 = Startup failure (not running as root, no DB, bad file read, etc)
# 2 = Not a cluster member
#
# TODO:
#
@ -63,7 +64,7 @@ if (($anvil->data->{scancore}{'scan-cluster'}{disable}) && (not $anvil->data->{s
# These are the tables used by this agent. The order matters as it controls to order the tables are created
# and sync'ed. For purges, this array is walked backwards.
$anvil->data->{scancore}{'scan-cluster'}{tables} = ["scan_cluster"];
$anvil->data->{scancore}{'scan-cluster'}{tables} = ["scan_cluster", "scan_cluster_nodes"];
# Handle start-up tasks
my $problem = $anvil->ScanCore->agent_startup({
@ -89,18 +90,21 @@ if ($anvil->data->{switches}{purge})
# Before we do anything, are we a node in a pacemaker cluster?
my $host_type = $anvil->Get->host_type;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { host_type => $host_type }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { host_type => $host_type }});
if ($host_type ne "node")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_log_0002", variables => { host_type => $host_type }});
$anvil->nice_exit({exit_code => 0});
}
# Read the data.
collect_data($anvil);
# Read last scan
read_last_scan($anvil);
# Read and process in one shot.
collect_data($anvil);
# Find changes.
find_changes($anvil);
$anvil->nice_exit({exit_code => 0});
@ -108,59 +112,511 @@ $anvil->nice_exit({exit_code => 0});
# Functions #
#############################################################################################################
# This reads in all the data we can find on the local system
sub collect_data
# Looks for changes.
sub find_changes
{
my ($anvil) = @_;
# Pick out core cluster details.
my $problem = $anvil->Cluster->parse_cib({debug => 2});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
# We can't track a cluster through name change, so either we're INSERTing a new one, or bust.
my $scan_cluster_anvil_uuid = $anvil->Cluster->get_anvil_uuid();
my $anvil_name = $anvil->Get->anvil_name_from_uuid({anvil_uuid => $scan_cluster_anvil_uuid});
my $scan_cluster_uuid = "";
my $cluster_name = $anvil->data->{cib}{parsed}{data}{cluster}{name};
my $stonith_enabled = $anvil->data->{cib}{parsed}{data}{stonith}{enabled};
my $maintenance_mode = $anvil->data->{cib}{parsed}{data}{cluster}{'maintenance-mode'};
my $stonith_max_attempts = $anvil->data->{cib}{parsed}{data}{cluster}{'stonith-max-attempts'};
my $stonith_max_attempts = $anvil->data->{cib}{parsed}{data}{stonith}{'max-attempts'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
cluster_name => $cluster_name,
stonith_enabled => $stonith_enabled,
maintenance_mode => $maintenance_mode,
stonith_max_attempts => $stonith_max_attempts,
}});
### TODO: If we're node 2, or not in the cluster, only update our information in the
### 'scan_cluster_nodes' table. Node 1 will update everything else if it's 'ready' (else node 2
### will, if it's ready).
my $i_am = $anvil->Cluster->which_node({debug => 1});
my $my_node_name = $anvil->data->{cib}{parsed}{'local'}{name};
my $peer_node_name = $anvil->data->{cib}{parsed}{peer}{name};
my $peer_ready = $anvil->data->{cib}{parsed}{peer}{ready};
my $local_ready = $anvil->data->{cib}{parsed}{data}{node}{$my_node_name}{node_state}{ready};
if (exists $anvil->data->{sql}{anvil_uuid}{$scan_cluster_anvil_uuid})
{
# Check for a name change
$scan_cluster_uuid = $anvil->data->{sql}{anvil_uuid}{$scan_cluster_anvil_uuid};
my $old_cluster_name = $anvil->data->{sql}{scan_cluster}{scan_cluster_uuid}{$scan_cluster_uuid}{scan_cluster_name};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
i_am => $i_am,
my_node_name => $my_node_name,
peer_node_name => $peer_node_name,
peer_ready => $peer_ready,
local_ready => $local_ready,
scan_cluster_uuid => $scan_cluster_uuid,
old_cluster_name => $old_cluster_name,
}});
if ($cluster_name ne $old_cluster_name)
{
# The name of the cluster has changed.
my $query = "
UPDATE
scan_cluster
SET
scan_cluster_name = ".$anvil->Database->quote($cluster_name).",
modified_date = ".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
WHERE
scan_cluster_uuid = ".$anvil->Database->quote($scan_cluster_uuid)."
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
### TODO: Change the logic so that when both nodes are in the cluster, the node with the lowest
### load does the scan (typically the node without VMs).
if (($i_am eq "node2") && ($peer_ready))
my $variables = {
new_cluster_name => $cluster_name,
old_cluster_name => $old_cluster_name,
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0002", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0002", message_variables => $variables, set_by => $THIS_FILE});
}
}
else
{
# We're not going to run.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "scan_cluster_message_0001"});
$anvil->nice_exit({exit_code => 0});
# New cluster, INSERT
$scan_cluster_uuid = $anvil->Get->uuid();
my $query = "
INSERT INTO
scan_cluster
(
scan_cluster_uuid,
scan_cluster_anvil_uuid,
scan_cluster_name,
modified_date
) VALUES (
".$anvil->Database->quote($scan_cluster_uuid).",
".$anvil->Database->quote($scan_cluster_anvil_uuid).",
".$anvil->Database->quote($cluster_name).",
".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
);";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
my $variables = { cluster_name => $cluster_name };
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0001", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0001", message_variables => $variables, set_by => $THIS_FILE});
}
$anvil->Database->get_anvils();
foreach my $scan_cluster_node_name (sort {$a cmp $b} keys %{$anvil->data->{cib}{parsed}{data}{node}})
{
my $scan_cluster_node_host_uuid = $anvil->Get->host_uuid_from_name({host_name => $scan_cluster_node_name});
my $scan_cluster_node_pacemaker_id = $anvil->data->{cib}{parsed}{data}{node}{$scan_cluster_node_name}{node_state}{pacemaker_id};
my $scan_cluster_node_in_ccm = $anvil->data->{cib}{parsed}{data}{node}{$scan_cluster_node_name}{node_state}{in_ccm};
my $scan_cluster_node_crmd_member = $anvil->data->{cib}{parsed}{data}{node}{$scan_cluster_node_name}{node_state}{crmd};
my $scan_cluster_node_cluster_member = $anvil->data->{cib}{parsed}{data}{node}{$scan_cluster_node_name}{node_state}{'join'};
my $scan_cluster_node_maintenance_mode = $anvil->data->{cib}{parsed}{data}{node}{$scan_cluster_node_name}{node_state}{'maintenance-mode'};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
scan_cluster_node_name => $scan_cluster_node_name,
scan_cluster_node_host_uuid => $scan_cluster_node_host_uuid,
scan_cluster_node_pacemaker_id => $scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm => $scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member => $scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member => $scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode => $scan_cluster_node_maintenance_mode,
}});
if (exists $anvil->data->{sql}{scan_cluster_node_host_uuid}{$scan_cluster_node_host_uuid})
{
# Look for changes.
my $scan_cluster_node_uuid = $anvil->data->{sql}{scan_cluster_node_host_uuid}{$scan_cluster_node_host_uuid};
my $old_scan_cluster_node_name = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_name};
my $old_scan_cluster_node_pacemaker_id = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_pacemaker_id};
my $old_scan_cluster_node_in_ccm = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_in_ccm};
my $old_scan_cluster_node_crmd_member = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_crmd_member};
my $old_scan_cluster_node_cluster_member = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_cluster_member};
my $old_scan_cluster_node_maintenance_mode = $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_maintenance_mode};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
scan_cluster_node_uuid => $scan_cluster_node_uuid,
old_scan_cluster_node_name => $old_scan_cluster_node_name,
old_scan_cluster_node_pacemaker_id => $old_scan_cluster_node_pacemaker_id,
old_scan_cluster_node_in_ccm => $old_scan_cluster_node_in_ccm,
old_scan_cluster_node_crmd_member => $old_scan_cluster_node_crmd_member,
old_scan_cluster_node_cluster_member => $old_scan_cluster_node_cluster_member,
old_scan_cluster_node_maintenance_mode => $old_scan_cluster_node_maintenance_mode,
}});
my $update = 0;
if ($scan_cluster_node_name ne $old_scan_cluster_node_name)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
new_node_name => $scan_cluster_node_name,
old_node_name => $old_scan_cluster_node_name,
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0008", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0009", message_variables => $variables, set_by => $THIS_FILE});
}
if ($scan_cluster_node_pacemaker_id ne $old_scan_cluster_node_pacemaker_id)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
node_name => $scan_cluster_node_name,
new_pacemaker_id => $scan_cluster_node_pacemaker_id ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
old_pacemaker_id => $old_scan_cluster_node_pacemaker_id ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0004", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0004", message_variables => $variables, set_by => $THIS_FILE});
}
if ($scan_cluster_node_in_ccm ne $old_scan_cluster_node_in_ccm)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
node_name => $scan_cluster_node_name,
new_in_ccm => $scan_cluster_node_in_ccm ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
old_in_ccm => $old_scan_cluster_node_in_ccm ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0005", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0005", message_variables => $variables, set_by => $THIS_FILE});
}
if ($scan_cluster_node_crmd_member ne $old_scan_cluster_node_crmd_member)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
node_name => $scan_cluster_node_name,
new_crmd_member => $scan_cluster_node_crmd_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
old_crmd_member => $old_scan_cluster_node_crmd_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0006", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0006", message_variables => $variables, set_by => $THIS_FILE});
}
if ($scan_cluster_node_cluster_member ne $old_scan_cluster_node_cluster_member)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
node_name => $scan_cluster_node_name,
new_cluster_member => $scan_cluster_node_cluster_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
old_cluster_member => $old_scan_cluster_node_cluster_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0007", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0007", message_variables => $variables, set_by => $THIS_FILE});
}
if ($scan_cluster_node_maintenance_mode ne $old_scan_cluster_node_maintenance_mode)
{
$update = 1;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
my $variables = {
node_name => $scan_cluster_node_name,
new_maintenance_mode => $scan_cluster_node_maintenance_mode ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
old_maintenance_mode => $old_scan_cluster_node_maintenance_mode ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0008", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0008", message_variables => $variables, set_by => $THIS_FILE});
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { update => $update }});
if ($update)
{
my $query = "
UPDATE
scan_cluster_nodes
SET
scan_cluster_node_name = ".$anvil->Database->quote($scan_cluster_node_name).",
scan_cluster_node_pacemaker_id = ".$anvil->Database->quote($scan_cluster_node_pacemaker_id).",
scan_cluster_node_in_ccm = ".$anvil->Database->quote($scan_cluster_node_in_ccm).",
scan_cluster_node_crmd_member = ".$anvil->Database->quote($scan_cluster_node_crmd_member).",
scan_cluster_node_cluster_member = ".$anvil->Database->quote($scan_cluster_node_cluster_member).",
scan_cluster_node_maintenance_mode = ".$anvil->Database->quote($scan_cluster_node_maintenance_mode).",
modified_date = ".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
WHERE
scan_cluster_node_uuid = ".$anvil->Database->quote($scan_cluster_node_uuid)."
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
}
}
else
{
# Add the node.
my $scan_cluster_node_uuid = $anvil->Get->uuid();
my $query = "
INSERT INTO
scan_cluster_nodes
(
scan_cluster_node_uuid,
scan_cluster_node_scan_cluster_uuid,
scan_cluster_node_host_uuid,
scan_cluster_node_name,
scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode,
modified_date
) VALUES (
".$anvil->Database->quote($scan_cluster_node_uuid).",
".$anvil->Database->quote($scan_cluster_uuid).",
".$anvil->Database->quote($scan_cluster_node_host_uuid).",
".$anvil->Database->quote($scan_cluster_node_name).",
".$anvil->Database->quote($scan_cluster_node_pacemaker_id).",
".$anvil->Database->quote($scan_cluster_node_in_ccm).",
".$anvil->Database->quote($scan_cluster_node_crmd_member).",
".$anvil->Database->quote($scan_cluster_node_cluster_member).",
".$anvil->Database->quote($scan_cluster_node_maintenance_mode).",
".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
);";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
my $host_name = $anvil->Get->host_name_from_uuid({host_uuid => $scan_cluster_node_host_uuid});
my $variables = {
cluster_name => $cluster_name,
node_name => $scan_cluster_node_name,
host_uuid => $scan_cluster_node_host_uuid,
host_name => $host_name,
pacemaker_id => $scan_cluster_node_pacemaker_id,
in_ccm => $scan_cluster_node_in_ccm ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#", # Yes or No
crmd_member => $scan_cluster_node_crmd_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
cluster_member => $scan_cluster_node_cluster_member ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
maintenance_mode => $scan_cluster_node_maintenance_mode ? "#!string!scan_cluster_unit_0001!#" : "#!string!scan_cluster_unit_0002!#",
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0003", variables => $variables});
$anvil->Alert->register({debug => 2, alert_level => "notice", message => "scan_cluster_alert_0003", message_variables => $variables, set_by => $THIS_FILE});
}
}
### TODO: Check for / repair bad cluster config issues
# If we're still alive, we're either node 1, or we're node 2 and node 1 is not ready. If we're not ready,
if ($stonith_max_attempts ne "INFINITY")
{
### TODO: Call pcs to update
}
return(0);
}
# Read in existing data from the database.
sub read_last_scan
{
my ($anvil) = @_;
my $query = "
SELECT
scan_cluster_uuid,
scan_cluster_anvil_uuid,
scan_cluster_name
FROM
scan_cluster
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
my $results = $anvil->Database->query({query => $query, source => $THIS_FILE, line => __LINE__});
my $count = @{$results};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
results => $results,
count => $count,
}});
foreach my $row (@{$results})
{
# NOTE: There's no known way to track a cluster name change, so we can't really avoid having
# an entery per cluster name.
my $scan_cluster_uuid = $row->[0];
my $scan_cluster_anvil_uuid = $row->[1];
my $scan_cluster_name = $row->[2];
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
"scan_cluster_uuid" => $scan_cluster_uuid,
"scan_cluster_anvil_uuid" => $scan_cluster_anvil_uuid,
"scan_cluster_name" => $scan_cluster_name,
}});
# Store the old data now.
$anvil->data->{sql}{scan_cluster}{scan_cluster_uuid}{$scan_cluster_uuid}{scan_cluster_name} = $scan_cluster_name;
$anvil->data->{sql}{scan_cluster}{scan_cluster_uuid}{$scan_cluster_uuid}{scan_cluster_anvil_uuid} = $scan_cluster_anvil_uuid;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
"sql::scan_cluster::scan_cluster_uuid::${scan_cluster_uuid}::scan_cluster_name" => $anvil->data->{sql}{scan_cluster}{scan_cluster_uuid}{$scan_cluster_uuid}{scan_cluster_name},
"sql::scan_cluster::scan_cluster_uuid::${scan_cluster_uuid}::scan_cluster_anvil_uuid" => $anvil->data->{sql}{scan_cluster}{scan_cluster_uuid}{$scan_cluster_uuid}{scan_cluster_anvil_uuid},
}});
# Make it easy to look up the cluster_uuid from the anvil_uuid.
$anvil->data->{sql}{anvil_uuid}{$scan_cluster_anvil_uuid} = $scan_cluster_uuid;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
"sql::anvil_uuid::${scan_cluster_anvil_uuid}" => $anvil->data->{sql}{anvil_uuid}{$scan_cluster_anvil_uuid},
}});
}
undef $count;
undef $results;
$query = "
SELECT
scan_cluster_node_uuid,
scan_cluster_node_scan_cluster_uuid,
scan_cluster_node_host_uuid,
scan_cluster_node_name,
scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode
FROM
scan_cluster_nodes
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$results = $anvil->Database->query({query => $query, source => $THIS_FILE, line => __LINE__});
$count = @{$results};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
results => $results,
count => $count,
}});
foreach my $row (@{$results})
{
# We've got an entry in the 'scan_cluster_nodes' table, so now we'll look for data in the node and
# services tables.
my $scan_cluster_node_uuid = $row->[0];
my $scan_cluster_node_scan_cluster_uuid = $row->[1];
my $scan_cluster_node_host_uuid = $row->[2];
my $scan_cluster_node_name = $row->[3];
my $scan_cluster_node_pacemaker_id = $row->[4];
my $scan_cluster_node_in_ccm = $row->[5];
my $scan_cluster_node_crmd_member = $row->[6];
my $scan_cluster_node_cluster_member = $row->[7];
my $scan_cluster_node_maintenance_mode = $row->[8];
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => {
scan_cluster_node_uuid => $scan_cluster_node_uuid,
scan_cluster_node_scan_cluster_uuid => $scan_cluster_node_scan_cluster_uuid,
scan_cluster_node_host_uuid => $scan_cluster_node_host_uuid,
scan_cluster_node_name => $scan_cluster_node_name,
scan_cluster_node_pacemaker_id => $scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm => $scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member => $scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member => $scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode => $scan_cluster_node_maintenance_mode,
}});
# Store the old data now.
$anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid} = {
scan_cluster_node_scan_cluster_uuid => $scan_cluster_node_scan_cluster_uuid,
scan_cluster_node_host_uuid => $scan_cluster_node_host_uuid,
scan_cluster_node_name => $scan_cluster_node_name,
scan_cluster_node_pacemaker_id => $scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm => $scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member => $scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member => $scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode => $scan_cluster_node_maintenance_mode,
};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_scan_cluster_uuid" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_scan_cluster_uuid},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_host_uuid" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_host_uuid},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_name" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_name},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_pacemaker_id" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_pacemaker_id},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_in_ccm" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_in_ccm},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_crmd_member" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_crmd_member},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_cluster_member" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_cluster_member},
"sql::scan_cluster_node::scan_cluster_node_uuid::${scan_cluster_node_uuid}::scan_cluster_node_maintenance_mode" => $anvil->data->{sql}{scan_cluster_node}{scan_cluster_node_uuid}{$scan_cluster_node_uuid}{scan_cluster_node_maintenance_mode},
}});
$anvil->data->{sql}{scan_cluster_node_host_uuid}{$scan_cluster_node_host_uuid} = $scan_cluster_node_uuid;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
"sql::scan_cluster_node_host_uuid::${scan_cluster_node_host_uuid}" => $anvil->data->{sql}{scan_cluster_node_host_uuid}{$scan_cluster_node_host_uuid},
}});
}
return(0);
}
# This reads in all the data we can find on the local system
sub collect_data
{
my ($anvil) = @_;
# Pick out core cluster details.
my $problem = $anvil->Cluster->parse_cib({debug => 3});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { problem => $problem }});
# If there was a problem, we're not in the cluster.
if ($problem)
{
my $changed = $anvil->Alert->check_alert_sent({
record_locator => "scan_cluster::in_cluster",
set_by => $THIS_FILE,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { changed => $changed }});
if ($changed)
{
# Register an alert.
my $variables = { host_name => $anvil->Get->host_name() };
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0010", variables => $variables});
$anvil->Alert->register({alert_level => "warning", message => "scan_cluster_alert_0010", message_variables => $variables, set_by => $THIS_FILE});
# See if I need to mark us as out of the cluster. Normally, our peer would do this,
# but if we went down at the same time as our peer, both of us might not update the
# membership values.
my $query = "
SELECT
scan_cluster_node_uuid,
scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member
FROM
scan_cluster_nodes
WHERE
scan_cluster_node_host_uuid = ".$anvil->Database->quote($anvil->Get->host_uuid)."
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
my $results = $anvil->Database->query({query => $query, source => $THIS_FILE, line => __LINE__});
my $count = @{$results};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
results => $results,
count => $count,
}});
foreach my $row (@{$results})
{
# We've got an entry in the 'scan_cluster_nodes' table, so now we'll look for data in the node and
# services tables.
my $scan_cluster_node_uuid = $row->[0];
my $scan_cluster_node_in_ccm = $row->[1];
my $scan_cluster_node_crmd_member = $row->[2];
my $scan_cluster_node_cluster_member = $row->[3];
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
scan_cluster_node_uuid => $scan_cluster_node_uuid,
scan_cluster_node_in_ccm => $scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member => $scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member => $scan_cluster_node_cluster_member,
}});
if (($scan_cluster_node_in_ccm) or ($scan_cluster_node_crmd_member) or ($scan_cluster_node_cluster_member))
{
# Update
my $query = "
UPDATE
scan_cluster_nodes
SET
scan_cluster_node_in_ccm = '0',
scan_cluster_node_crmd_member = '0',
scan_cluster_node_cluster_member = '0',
modified_date = ".$anvil->Database->quote($anvil->data->{sys}{database}{timestamp})."
WHERE
scan_cluster_node_uuid = ".$anvil->Database->quote($scan_cluster_node_uuid)."
;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Database->write({query => $query, source => $THIS_FILE, line => __LINE__});
}
}
}
# Exit now.
$anvil->nice_exit({exit_code => 2});
}
else
{
# See if we came back into the cluster
my $changed = $anvil->Alert->check_alert_sent({
clear => 1,
record_locator => "scan_cluster::in_cluster",
set_by => $THIS_FILE,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { changed => $changed }});
if ($changed)
{
# Register an alert.
my $variables = { host_name => $anvil->Get->host_name() };
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_cluster_alert_0011", variables => $variables});
$anvil->Alert->register({alert_level => "warning", clear_alert => 1, message => "scan_cluster_alert_0011", message_variables => $variables, set_by => $THIS_FILE});
}
}
return(0);
}

@ -2,24 +2,21 @@
--
-- NOTE: This agent is not host-bound. It's update by node 1 if it's in the cluster, else by node 2 if it's
-- the only one online.
-- NOTE: Server data is not stored here. See scan-server for data on those resources.
CREATE TABLE scan_cluster (
scan_cluster_uuid uuid primary key,
scan_cluster_anvil_uuid uuid not null, -- The Anvil! UUID this cluster is associated with.
scan_cluster_name text not null, -- The name of the cluster
scan_cluster_stonith_enabled boolean not null, -- Tracks when stonith (fencing) was enabled/disabled
scan_cluster_maintenance_mode boolean not null, -- Tracks when maintenance mode is enabled/disabled.
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
modified_date timestamp with time zone not null
);
ALTER TABLE scan_cluster OWNER TO admin;
CREATE TABLE history.scan_cluster (
history_id bigserial,
scan_cluster_uuid uuid,
scan_cluster_anvil_uuid uuid,
scan_cluster_name text,
scan_cluster_stonith_enabled boolean,
scan_cluster_maintenance_mode boolean,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster OWNER TO admin;
@ -32,16 +29,13 @@ BEGIN
SELECT INTO history_scan_cluster * FROM scan_cluster WHERE scan_cluster_uuid=new.scan_cluster_uuid;
INSERT INTO history.scan_cluster
(scan_cluster_uuid,
scan_cluster_anvil_uuid,
scan_cluster_name,
scan_cluster_stonith_enabled,
scan_cluster_maintenance_mode,
modified_date)
VALUES
(history_scan_cluster.scan_cluster_uuid,
history_scan_cluster.scan_cluster_host_uuid,
history_scan_cluster.scan_cluster_anvil_uuid,
history_scan_cluster.scan_cluster_name,
history_scan_cluster.scan_cluster_stonith_enabled,
history_scan_cluster.scan_cluster_maintenance_mode,
history_scan_cluster.modified_date);
RETURN NULL;
END;
@ -54,16 +48,21 @@ CREATE TRIGGER trigger_scan_cluster
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster();
-- Node status information
CREATE TABLE scan_cluster_nodes (
scan_cluster_node_uuid uuid primary key,
scan_cluster_node_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_node_host_uuid uuid not null, -- This is the host UUID of the node.
scan_cluster_node_name text not null, -- This is the host name as reported by pacemaker. It _should_ match up to a host name in 'hosts'.
scan_cluster_node_pacemaker_id numeric not null, -- This is the internal pacemaker ID number of this node.
scan_cluster_node_in_ccm boolean not null, -- Indicates if the node is a corosync cluster member, first step in a node comint online.
scan_cluster_node_crmd_member boolean not null, -- Indicates if the node is in the corosync process group. Value from the CIB is 'online' or 'offline'. Second step in a node coming online
scan_cluster_node_cluster_member boolean not null, -- Indicates if the node has joined the controller and is a full member. Value from the CIB is 'member' or 'down'. Final step in the joining the cluster.
scan_cluster_node_maintenance_mode boolean not null, -- Tracks when maintenance mode is enabled/disabled.
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_node_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
FOREIGN KEY(scan_cluster_node_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster_nodes OWNER TO admin;
@ -74,6 +73,10 @@ CREATE TABLE history.scan_cluster_nodes (
scan_cluster_node_host_uuid uuid,
scan_cluster_node_name text,
scan_cluster_node_pacemaker_id numeric,
scan_cluster_node_in_ccm boolean,
scan_cluster_node_crmd_member boolean,
scan_cluster_node_cluster_member boolean,
scan_cluster_node_maintenance_mode boolean,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster_nodes OWNER TO admin;
@ -90,6 +93,10 @@ BEGIN
scan_cluster_node_host_uuid,
scan_cluster_node_name,
scan_cluster_node_pacemaker_id,
scan_cluster_node_in_ccm,
scan_cluster_node_crmd_member,
scan_cluster_node_cluster_member,
scan_cluster_node_maintenance_mode,
modified_date)
VALUES
(history_scan_cluster_nodes.scan_cluster_node_uuid,
@ -97,6 +104,10 @@ BEGIN
history_scan_cluster_nodes.scan_cluster_node_host_uuid,
history_scan_cluster_nodes.scan_cluster_node_name,
history_scan_cluster_nodes.scan_cluster_node_pacemaker_id,
history_scan_cluster_nodes.scan_cluster_node_in_ccm,
history_scan_cluster_nodes.scan_cluster_node_crmd_member,
history_scan_cluster_nodes.scan_cluster_node_cluster_member,
history_scan_cluster_nodes.scan_cluster_node_maintenance_mode,
history_scan_cluster_nodes.modified_date);
RETURN NULL;
END;
@ -109,396 +120,183 @@ CREATE TRIGGER trigger_scan_cluster_nodes
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_nodes();
CREATE TABLE scan_cluster_stoniths (
scan_cluster_stonith_uuid uuid primary key,
scan_cluster_stonith_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_stonith_host_uuid uuid not null, -- This is the host UUID of the node.
scan_cluster_stonith_name text not null, -- This is the 'stonith id'
scan_cluster_stonith_arguments text not null, -- This is the fence agent + collection of primitive variable=value pairs (the nvpairs)
scan_cluster_stonith_operations text not null, -- This is the collection of operation variable=value pairs (the nvpairs)
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_stonith_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster_stoniths OWNER TO admin;
CREATE TABLE history.scan_cluster_stoniths (
history_id bigserial,
scan_cluster_stonith_uuid uuid,
scan_cluster_stonith_scan_cluster_uuid uuid,
scan_cluster_stonith_host_uuid uuid,
scan_cluster_stonith_name text,
scan_cluster_stonith_arguments text,
scan_cluster_stonith_operations text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster_stoniths OWNER TO admin;
CREATE FUNCTION history_scan_cluster_stoniths() RETURNS trigger
AS $$
DECLARE
history_scan_cluster_stoniths RECORD;
BEGIN
SELECT INTO history_scan_cluster_stoniths * FROM scan_cluster_stoniths WHERE scan_cluster_stonith_uuid=new.scan_cluster_stonith_uuid;
INSERT INTO history.scan_cluster_stoniths
(scan_cluster_stonith_uuid,
scan_cluster_stonith_scan_cluster_uuid,
scan_cluster_stonith_host_uuid,
scan_cluster_stonith_name,
scan_cluster_stonith_arguments,
scan_cluster_stonith_operations,
modified_date)
VALUES
(history_scan_cluster_stoniths.scan_cluster_stonith_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_scan_cluster_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_host_uuid,
history_scan_cluster_stoniths.scan_cluster_stonith_name,
history_scan_cluster_stoniths.scan_cluster_stonith_arguments,
history_scan_cluster_stoniths.scan_cluster_stonith_operations,
history_scan_cluster_stoniths.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_cluster_stoniths() OWNER TO admin;
CREATE TRIGGER trigger_scan_cluster_stoniths
AFTER INSERT OR UPDATE ON scan_cluster_stoniths
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_stoniths();
CREATE TABLE scan_cluster_servers (
scan_cluster_server_uuid uuid primary key,
scan_cluster_server_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
scan_cluster_server_name text not null, -- This is the name of the server (ocf primitive id)
scan_cluster_server_state text not null, -- This is the 'running' or why it's off (off by user, etc)
scan_cluster_server_host_name uuid not null, -- This is the (cluster) name of the node hosting the server. Blank if the server is off.
scan_cluster_server_arguments text not null, -- This is the collection of primitive variable=value pairs (the nvpairs)
scan_cluster_server_operations text not null, -- This is the collection of operation variable=value pairs (the nvpairs)
scan_cluster_server_meta text not null, -- This is the collection of meta attribute variable=value pairs (the nvpairs)
modified_date timestamp with time zone not null,
FOREIGN KEY(scan_cluster_server_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid),
FOREIGN KEY(scan_cluster_host_uuid) REFERENCES hosts(host_uuid)
);
ALTER TABLE scan_cluster_servers OWNER TO admin;
CREATE TABLE history.scan_cluster_servers (
history_id bigserial,
scan_cluster_server_uuid uuid,
scan_cluster_server_scan_cluster_uuid uuid,
scan_cluster_server_name text,
scan_cluster_server_arguments text,
scan_cluster_server_operations text,
scan_cluster_server_meta text,
modified_date timestamp with time zone not null
);
ALTER TABLE history.scan_cluster_servers OWNER TO admin;
CREATE FUNCTION history_scan_cluster_servers() RETURNS trigger
AS $$
DECLARE
history_scan_cluster_servers RECORD;
BEGIN
SELECT INTO history_scan_cluster_servers * FROM scan_cluster_servers WHERE scan_cluster_server_uuid=new.scan_cluster_server_uuid;
INSERT INTO history.scan_cluster_servers
(scan_cluster_server_uuid,
scan_cluster_server_scan_cluster_uuid,
scan_cluster_server_name,
scan_cluster_server_arguments,
scan_cluster_server_operations,
scan_cluster_server_meta,
modified_date)
VALUES
(history_scan_cluster_servers.scan_cluster_server_uuid,
history_scan_cluster_servers.scan_cluster_server_scan_cluster_uuid,
history_scan_cluster_servers.scan_cluster_server_host_uuid,
history_scan_cluster_servers.scan_cluster_server_name,
history_scan_cluster_servers.scan_cluster_server_arguments,
history_scan_cluster_servers.scan_cluster_server_operations,
history_scan_cluster_servers.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_scan_cluster_servers() OWNER TO admin;
CREATE TRIGGER trigger_scan_cluster_servers
AFTER INSERT OR UPDATE ON scan_cluster_servers
FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_servers();
-- Example CIB
# pcs resource
* srv07-el6 (ocf::alteeve:server): Stopped (disabled)
* srv01-sql (ocf::alteeve:server): Started mk-a02n01
* srv02-lab1 (ocf::alteeve:server): Started mk-a02n01
* srv08-m2-psql (ocf::alteeve:server): Stopped (disabled)
<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="418" num_updates="4" admin_epoch="0" cib-last-written="Mon Sep 21 13:30:38 2020" update-origin="mk-a02n01" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1597956504"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01"/>
<node id="2" uname="mk-a02n02"/>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv01-sql" provider="alteeve" type="server">
<instance_attributes id="srv01-sql-instance_attributes">
<nvpair id="srv01-sql-instance_attributes-name" name="name" value="srv01-sql"/>
</instance_attributes>
<meta_attributes id="srv01-sql-meta_attributes">
<nvpair id="srv01-sql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv01-sql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv01-sql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv01-sql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv01-sql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv01-sql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv01-sql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv01-sql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv02-lab1" provider="alteeve" type="server">
<instance_attributes id="srv02-lab1-instance_attributes">
<nvpair id="srv02-lab1-instance_attributes-name" name="name" value="srv02-lab1"/>
</instance_attributes>
<meta_attributes id="srv02-lab1-meta_attributes">
<nvpair id="srv02-lab1-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv02-lab1-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-stop" name="stop" value="INFINITY"/>
</meta_attributes>
<operations>
<op id="srv02-lab1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv02-lab1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv02-lab1-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv02-lab1-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv02-lab1-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv02-lab1-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv08-m2-psql" provider="alteeve" type="server">
<instance_attributes id="srv08-m2-psql-instance_attributes">
<nvpair id="srv08-m2-psql-instance_attributes-name" name="name" value="srv08-m2-psql"/>
</instance_attributes>
<meta_attributes id="srv08-m2-psql-meta_attributes">
<nvpair id="srv08-m2-psql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv08-m2-psql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv08-m2-psql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv08-m2-psql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv08-m2-psql-monitor-interval-60" interval="60" name="monitor" on-fail="block"/>
<op id="srv08-m2-psql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv08-m2-psql-start-interval-0s" interval="0s" name="start" timeout="30"/>
<op id="srv08-m2-psql-stop-interval-0s" interval="0s" name="stop" timeout="INFINITY"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="location-srv07-el6-mk-a02n01-200" node="mk-a02n01" rsc="srv07-el6" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n02-100" node="mk-a02n02" rsc="srv07-el6" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n01-200" node="mk-a02n01" rsc="srv01-sql" score="200"/>
<rsc_location id="location-srv01-sql-mk-a02n02-100" node="mk-a02n02" rsc="srv01-sql" score="100"/>
<rsc_location id="location-srv02-lab1-mk-a02n01-200" node="mk-a02n01" rsc="srv02-lab1" score="200"/>
<rsc_location id="location-srv02-lab1-mk-a02n02-100" node="mk-a02n02" rsc="srv02-lab1" score="100"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n01-200" node="mk-a02n01" rsc="srv08-m2-psql" score="200"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n02-100" node="mk-a02n02" rsc="srv08-m2-psql" score="100"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="11:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;11:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="1" queue-time="1" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;23:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="623" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_monitor_60000" operation_key="apc_snmp_node1_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;24:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="556" queue-time="0" op-digest="9dd197b1c8871a78c74a32b26949998d" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;13:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="27:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;27:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="100" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
<lrm_rsc_op id="ipmilan_node2_monitor_60000" operation_key="ipmilan_node2_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;28:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="86" queue-time="0" op-digest="467ef5117cbb737e5c6fc23b58809791" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;15:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;31:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="603" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_monitor_60000" operation_key="apc_snmp_node2_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="32:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;32:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708715" exec-time="555" queue-time="0" op-digest="910a16919098d7bca091e972cf8844f5" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="17:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;17:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="605" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="18:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;18:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="604" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;19:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="603" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;20:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n02" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600708715" last-run="1600708715" exec-time="602" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;21:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="42" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="172" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
<lrm_rsc_op id="ipmilan_node1_monitor_60000" operation_key="ipmilan_node1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;22:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="45" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708716" exec-time="90" queue-time="0" op-digest="7064441a5f8ccc94d13cc9a1433de0a5" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;2:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;25:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="43" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="666" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_monitor_60000" operation_key="apc_snmp_node1_mk-pdu02_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="26:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;26:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="46" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708717" exec-time="574" queue-time="1" op-digest="da20bfed231d75a3b22f97eb06bb445f" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;4:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;29:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="44" rc-code="0" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="675" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_monitor_60000" operation_key="apc_snmp_node2_mk-pdu01_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="30:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;30:0:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="47" rc-code="0" op-status="0" interval="60000" last-rc-change="1600708717" exec-time="565" queue-time="0" op-digest="5b8d168b9627dad87e1ba2edace17f1e" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;6:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1600708714" last-run="1600708714" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;7:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="598" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;19:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="48" rc-code="0" op-status="0" interval="0" last-rc-change="1600709387" last-run="1600709387" exec-time="13119" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
<lrm_rsc_op id="srv01-sql_monitor_60000" operation_key="srv01-sql_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="20:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;20:1:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="49" rc-code="0" op-status="0" interval="60000" last-rc-change="1600709400" exec-time="546" queue-time="0" op-digest="0434e67501e3e7af47a547723c35b411"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;22:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="50" rc-code="0" op-status="0" interval="0" last-rc-change="1600709438" last-run="1600709438" exec-time="12668" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
<lrm_rsc_op id="srv02-lab1_monitor_60000" operation_key="srv02-lab1_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:0;23:2:0:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="51" rc-code="0" op-status="0" interval="60000" last-rc-change="1600709451" exec-time="549" queue-time="0" op-digest="435d654a0384ef5a77a7517d682950ce"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" transition-magic="0:7;10:0:7:2c429916-850e-4468-abb7-8f95e44fdf8e" exit-reason="" on_node="mk-a02n01" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1600708716" last-run="1600708716" exec-time="596" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
-- TODO: We may want to track this data in the future. For now, we're not going to bother as we can always
-- dig through the historical cib.xml.X files on the nodes.
--
-- -- Constraints; Useful for tracking when servers are asked to migate.
-- CREATE TABLE scan_cluster_constraints (
-- scan_cluster_constraint_uuid uuid primary key,
-- scan_cluster_constraint_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
-- scan_cluster_constraint_server_name text not null, -- This is the server name the constraint applies to.
-- scan_cluster_constraint_node1_name text not null, -- This is name of the first node
-- scan_cluster_constraint_node1_score numeric not null, -- This is the score assigned to the first node (larger number is higher priority)
-- scan_cluster_constraint_node2_name text not null, -- This is name of the second node
-- scan_cluster_constraint_node2_score numeric not null, -- This is the score assigned to the second node (larger number is higher priority)
-- modified_date timestamp with time zone not null,
--
-- FOREIGN KEY(scan_cluster_constraint_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid)
-- );
-- ALTER TABLE scan_cluster_constraints OWNER TO admin;
--
-- CREATE TABLE history.scan_cluster_constraints (
-- history_id bigserial,
-- scan_cluster_constraint_uuid uuid,
-- scan_cluster_constraint_scan_cluster_uuid uuid,
-- scan_cluster_constraint_server_name text,
-- scan_cluster_constraint_node1_name text,
-- scan_cluster_constraint_node1_score numeric,
-- scan_cluster_constraint_node2_name text,
-- scan_cluster_constraint_node2_score numeric,
-- modified_date timestamp with time zone not null
-- );
-- ALTER TABLE history.scan_cluster_constraints OWNER TO admin;
--
-- CREATE FUNCTION history_scan_cluster_constraints() RETURNS trigger
-- AS $$
-- DECLARE
-- history_scan_cluster_constraints RECORD;
-- BEGIN
-- SELECT INTO history_scan_cluster_constraints * FROM scan_cluster_constraints WHERE scan_cluster_constraint_uuid=new.scan_cluster_constraint_uuid;
-- INSERT INTO history.scan_cluster_constraints
-- (scan_cluster_constraint_uuid,
-- scan_cluster_constraint_scan_cluster_uuid,
-- scan_cluster_constraint_server_name,
-- scan_cluster_constraint_node1_name,
-- scan_cluster_constraint_node1_score,
-- scan_cluster_constraint_node2_name,
-- scan_cluster_constraint_node2_score,
-- modified_date)
-- VALUES
-- (history_scan_cluster_constraints.scan_cluster_constraint_uuid,
-- history_scan_cluster_constraints.scan_cluster_constraint_scan_cluster_uuid,
-- history_scan_cluster_constraints.scan_cluster_constraint_server_name,
-- history_scan_cluster_constraints.scan_cluster_constraint_node1_name,
-- history_scan_cluster_constraints.scan_cluster_constraint_node1_score,
-- history_scan_cluster_constraints.scan_cluster_constraint_node2_name,
-- history_scan_cluster_constraints.scan_cluster_constraint_node2_score,
-- history_scan_cluster_constraints.modified_date);
-- RETURN NULL;
-- END;
-- $$
-- LANGUAGE plpgsql;
-- ALTER FUNCTION history_scan_cluster_constraints() OWNER TO admin;
--
-- CREATE TRIGGER trigger_scan_cluster_constraints
-- AFTER INSERT OR UPDATE ON scan_cluster_constraints
-- FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_constraints();
--
--
-- -- This stores the fence (stonith) configuration data. We use 'fence' instead of 'stonith' because pacemaker
-- -- uses both (see 'fence topology', for example), and 'fence' implies fabric and power fencing, where the
-- -- name 'stonith' implies power fencing only.
-- CREATE TABLE scan_cluster_fences (
-- scan_cluster_fence_uuid uuid primary key,
-- scan_cluster_fence_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
-- scan_cluster_fence_target_node_name text not null, -- This is the node name that the fence will act on (kill)
-- scan_cluster_fence_name text not null, -- This is the 'stonith id'
-- scan_cluster_fence_arguments text not null, -- This is the fence agent + collection of primitive variable=value pairs (the nvpairs)
-- scan_cluster_fence_operations text not null, -- This is the collection of operation variable=value pairs (the nvpairs)
-- modified_date timestamp with time zone not null,
--
-- FOREIGN KEY(scan_cluster_fence_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid)
-- );
-- ALTER TABLE scan_cluster_fences OWNER TO admin;
--
-- CREATE TABLE history.scan_cluster_fences (
-- history_id bigserial,
-- scan_cluster_fence_uuid uuid,
-- scan_cluster_fence_scan_cluster_uuid uuid,
-- scan_cluster_fence_target_node_name text,
-- scan_cluster_fence_name text,
-- scan_cluster_fence_arguments text,
-- scan_cluster_fence_operations text,
-- modified_date timestamp with time zone not null
-- );
-- ALTER TABLE history.scan_cluster_fences OWNER TO admin;
--
-- CREATE FUNCTION history_scan_cluster_fences() RETURNS trigger
-- AS $$
-- DECLARE
-- history_scan_cluster_fences RECORD;
-- BEGIN
-- SELECT INTO history_scan_cluster_fences * FROM scan_cluster_fences WHERE scan_cluster_fence_uuid=new.scan_cluster_fence_uuid;
-- INSERT INTO history.scan_cluster_fences
-- (scan_cluster_fence_uuid,
-- scan_cluster_fence_scan_cluster_uuid,
-- scan_cluster_fence_target_node_name,
-- scan_cluster_fence_name,
-- scan_cluster_fence_arguments,
-- scan_cluster_fence_operations,
-- modified_date)
-- VALUES
-- (history_scan_cluster_fences.scan_cluster_fence_uuid,
-- history_scan_cluster_fences.scan_cluster_fence_scan_cluster_uuid,
-- history_scan_cluster_fences.scan_cluster_fence_target_node_name,
-- history_scan_cluster_fences.scan_cluster_fence_name,
-- history_scan_cluster_fences.scan_cluster_fence_arguments,
-- history_scan_cluster_fences.scan_cluster_fence_operations,
-- history_scan_cluster_fences.modified_date);
-- RETURN NULL;
-- END;
-- $$
-- LANGUAGE plpgsql;
-- ALTER FUNCTION history_scan_cluster_fences() OWNER TO admin;
--
-- CREATE TRIGGER trigger_scan_cluster_fences
-- AFTER INSERT OR UPDATE ON scan_cluster_fences
-- FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_fences();
--
--
-- -- This stores data about the order of fencing actions
-- CREATE TABLE scan_cluster_fence_topologies (
-- scan_cluster_fence_topology_uuid uuid primary key,
-- scan_cluster_fence_topology_scan_cluster_uuid uuid not null, -- The parent scan_cluster_uuid.
-- scan_cluster_fence_topology_target_node_name text not null, -- This is the node that the topology applies to.
-- scan_cluster_fence_topology_index numeric not null, -- This is numerical order that the associated devices will be tried in. Lower value == higher priority.
-- scan_cluster_fence_topology_device text not null, -- This is the (comma-separated) devices used in this index
-- modified_date timestamp with time zone not null,
--
-- FOREIGN KEY(scan_cluster_fence_topology_scan_cluster_uuid) REFERENCES scan_cluster(scan_cluster_uuid)
-- );
-- ALTER TABLE scan_cluster_fence_topologies OWNER TO admin;
--
-- CREATE TABLE history.scan_cluster_fence_topologies (
-- history_id bigserial,
-- scan_cluster_fence_topology_uuid uuid,
-- scan_cluster_fence_topology_scan_cluster_uuid uuid,
-- scan_cluster_fence_topology_target_node_name text,
-- scan_cluster_fence_topology_index numeric,
-- scan_cluster_fence_topology_device text,
-- modified_date timestamp with time zone not null
-- );
-- ALTER TABLE history.scan_cluster_fence_topologies OWNER TO admin;
--
-- CREATE FUNCTION history_scan_cluster_fence_topologies() RETURNS trigger
-- AS $$
-- DECLARE
-- history_scan_cluster_fence_topologies RECORD;
-- BEGIN
-- SELECT INTO history_scan_cluster_fence_topologies * FROM scan_cluster_fence_topologies WHERE scan_cluster_fence_topology_uuid=new.scan_cluster_fence_topology_uuid;
-- INSERT INTO history.scan_cluster_fence_topologies
-- (scan_cluster_fence_topology_uuid,
-- scan_cluster_fence_topology_scan_cluster_uuid,
-- scan_cluster_fence_topology_target_node_name,
-- scan_cluster_fence_topology_index,
-- scan_cluster_fence_topology_device,
-- modified_date)
-- VALUES
-- (history_scan_cluster_fence_topologies.scan_cluster_fence_topology_uuid,
-- history_scan_cluster_fence_topologies.scan_cluster_fence_topology_scan_cluster_uuid,
-- history_scan_cluster_fence_topologies.scan_cluster_fence_topology_target_node_name,
-- history_scan_cluster_fence_topologies.scan_cluster_fence_topology_index,
-- history_scan_cluster_fence_topologies.scan_cluster_fence_topology_device,
-- history_scan_cluster_fence_topologies.modified_date);
-- RETURN NULL;
-- END;
-- $$
-- LANGUAGE plpgsql;
-- ALTER FUNCTION history_scan_cluster_fence_topologies() OWNER TO admin;
--
-- CREATE TRIGGER trigger_scan_cluster_fence_topologies
-- AFTER INSERT OR UPDATE ON scan_cluster_fence_topologies
-- FOR EACH ROW EXECUTE PROCEDURE history_scan_cluster_fence_topologies();

@ -14,17 +14,35 @@ NOTE: All string keys MUST be prefixed with the agent name! ie: 'scan_cluster_lo
<language name="en_CA" long_name="Canadian English" description="ScanCore scan agent that monitors hardware, like RAM modules, CSS LED status, CPU information, etc.">
<!-- Alert entries -->
<key name="scan_cluster_alert_0001"></key>
<key name="scan_cluster_alert_0001">The new cluster: [#!variable!cluster_name!#] has been found.</key>
<key name="scan_cluster_alert_0002">The cluster: [#!variable!old_cluster_name!#] has been renamed to: [#!variable!new_cluster_name!#].</key>
<key name="scan_cluster_alert_0003">The new node: [#!variable!node_name!#] has been found in the cluster: [#!variable!cluster_name!#];
Host Name/UUID: .......... [#!variable!host_name!#] / [#!variable!host_uuid!#]
Pacemaker ID: ............ [#!variable!pacemaker_id!#]
Corosync Cluster Member: . [#!variable!in_ccm!#]
In Corosync Process Group: [#!variable!crmd_member!#]
Joined Domain: ........... [#!variable!cluster_member!#]
In Maintenance Mode: ..... [#!variable!maintenance_mode!#]
</key>
<key name="scan_cluster_alert_0004">The node: [#!variable!node_name!#] pacemaker ID: [#!variable!old_pacemaker_id!#] has changed to: [#!variable!new_pacemaker_id!#]</key>
<key name="scan_cluster_alert_0005">The node: [#!variable!node_name!#] corosync cluster membership status has changed from: [#!variable!old_in_ccm!#] has changed to: [#!variable!new_in_ccm!#]</key>
<key name="scan_cluster_alert_0006">The node: [#!variable!node_name!#] corosync process group status changed from: [#!variable!old_crmd_member!#] has changed to: [#!variable!new_crmd_member!#]</key>
<key name="scan_cluster_alert_0007">The node: [#!variable!node_name!#] cluster domain membership status changed from: [#!variable!old_cluster_member!#] has changed to: [#!variable!new_cluster_member!#]</key>
<key name="scan_cluster_alert_0008">The node: [#!variable!node_name!#] maintenance mode status has changed from: [#!variable!old_maintenance_mode!#] has changed to: [#!variable!new_maintenance_mode!#]</key>
<key name="scan_cluster_alert_0009">The node: [#!variable!old_node_name!#] has been renamed to: [#!variable!new_node_name!#]</key>
<key name="scan_cluster_alert_0010">The node: [#!variable!host_name!#] is no longer in the cluster.</key>
<key name="scan_cluster_alert_0011">The node: [#!variable!host_name!#] is returning back into the cluster.</key>
<!-- Log entries -->
<key name="scan_cluster_log_0001">Starting: [#!variable!program!#].</key>
<key name="scan_cluster_log_0002">This host is a: [#!variable!host_type!#], this agent is only useful on nodes. Exiting.</key>
<!-- Message entries (usually meant to be alerts) -->
<key name="scan_cluster_message_0001">We're node 2, and node 1 is running as well. Exiting as only one node needs to run this agent.</key>
<key name="scan_cluster_message_0001"></key>
<!-- Units -->
<key name="scan_cluster_unit_0001"></key>
<key name="scan_cluster_unit_0001">Yes</key>
<key name="scan_cluster_unit_0002">No</key>
</language>
</words>

@ -1314,11 +1314,12 @@ sub process_health
{
# Mark us as having a fairly major health issue if this has been the case for more
# than five minutes.
my $age = $anvil->Database->check_condition_age({
my $age = $anvil->Alert->check_condition_age({
debug => 2,
name => "scan-hardware::less_ram_than_peer",
host_uuid => $anvil->Get->host_uuid,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { age => $age }});
if ($age > 300)
{
@ -1350,7 +1351,7 @@ sub process_health
}
elsif (($peer_ram_total == $hardware_ram_total) or ($difference < $anvil->data->{scancore}{'scan-hardware'}{ram}{clear_threshold}))
{
my $age = $anvil->Database->check_condition_age({
my $age = $anvil->Alert->check_condition_age({
debug => 2,
clear => 1,
name => "scan-hardware::less_ram_than_peer",

@ -551,9 +551,9 @@ DELETED - Marks a server as no longer existing
# Server migated (to the peer or to a new Anvil!)
my $variables = {
server => $server_name,
old_host_name => $anvil->Get->host_name_from_uuid({host_uuid => $old_server_host_uuid}),
old_host_name => $old_server_host_uuid eq "NULL" ? "NULL" : $anvil->Get->host_name_from_uuid({host_uuid => $old_server_host_uuid}),
old_host_uuid => $old_server_host_uuid,
new_host_name => $anvil->Get->host_name_from_uuid({host_uuid => $server_host_uuid}),
new_host_name => $server_host_uuid eq "NULL" ? "NULL" : $anvil->Get->host_name_from_uuid({host_uuid => $server_host_uuid}),
new_host_uuid => $server_host_uuid,
};
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_server_alert_0012", variables => $variables});
@ -690,7 +690,7 @@ DELETED - Marks a server as no longer existing
my $host_anvil_uuid = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_anvil_uuid};
my $host_anvil_name = $anvil->Get->anvil_name_from_uuid({anvil_uuid => $host_anvil_uuid});
my $server_host_uuid = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_host_uuid};
my $server_host_name = $anvil->Get->host_name_from_uuid({host_uuid => $server_host_uuid});
my $server_host_name = $server_host_uuid eq "NULL" ? "NULL" : $anvil->Get->host_name_from_uuid({host_uuid => $server_host_uuid});
my $server_state = $anvil->data->{servers}{server_uuid}{$server_uuid}{server_state};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
's1:server_name' => $server_name,

@ -235,6 +235,7 @@ The error was:
<key name="error_0160">The '#!variable!name!#': [#!variable!uuid!#] is not valid.</key>
<key name="error_0161">Unable to mark the server with UUID: [#!variable!uuid!#] as "deleted" because it doesn't apprear to exist in the database in the first place.</key>
<key name="error_0162">The 'anvil_uuid': [#!variable!anvil_uuid!#] in invalid.</key>
<key name="error_0163">The MIB file: [#!variable!mib!#] doesn't exist or can't be read.</key>
<!-- Table headers -->
<key name="header_0001">Current Network Interfaces and States</key>

@ -16,6 +16,7 @@
# - Write the status of this and the scancore daemon to /etc/anvil/anvil.motd and symlink it to /etc/motd.d/
# - Write a script that runs in crontab at UTC 17:00 that sends an email if Scancore or anvil-daemon are disabled.
# - Examine limites in: https://www.freedesktop.org/software/systemd/man/systemd.exec.html#LimitCPU=
# - Write a background program to scan the BCN and uses OUI data to try and find / auto-configure PDUs and UPSes
#
# NOTE:
# - For later; 'reboot --force --force' immediately kills the OS, like disabling ACPI on EL6 and hitting the

@ -33,31 +33,370 @@ print "Connecting to the database(s);\n";
$anvil->Database->connect({debug => 3});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, 'print' => 1, level => 2, secure => 0, key => "log_0132"});
my $host_name = "mk-a02n02";
my $host_uuid = $anvil->Get->host_uuid_from_name({
debug => 2,
host_name => $host_name,
});
print "host name: [".$host_name."], host_uuid: [".$host_uuid."]\n";
my $cib = '<cib crm_feature_set="3.3.0" validate-with="pacemaker-3.2" epoch="598" num_updates="3" admin_epoch="0" cib-last-written="Mon Oct 12 08:01:01 2020" update-origin="mk-a02n02" update-client="cibadmin" update-user="root" have-quorum="1" dc-uuid="2">
<configuration>
<crm_config>
<cluster_property_set id="cib-bootstrap-options">
<nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
<nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="2.0.3-5.el8_2.1-4b1f869f0f"/>
<nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
<nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mk-anvil-02"/>
<nvpair id="cib-bootstrap-options-stonith-max-attempts" name="stonith-max-attempts" value="INFINITY"/>
<nvpair id="cib-bootstrap-options-stonith-enabled" name="stonith-enabled" value="true"/>
<nvpair id="cib-bootstrap-options-maintenance-mode" name="maintenance-mode" value="false"/>
<nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1602294579"/>
</cluster_property_set>
</crm_config>
<nodes>
<node id="1" uname="mk-a02n01">
<instance_attributes id="nodes-1">
<nvpair id="nodes-1-maintenance" name="maintenance" value="on"/>
</instance_attributes>
</node>
<node id="2" uname="mk-a02n02">
<instance_attributes id="nodes-2">
<nvpair id="nodes-2-maintenance" name="maintenance" value="on"/>
</instance_attributes>
</node>
</nodes>
<resources>
<primitive class="stonith" id="ipmilan_node1" type="fence_ipmilan">
<instance_attributes id="ipmilan_node1-instance_attributes">
<nvpair id="ipmilan_node1-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.1"/>
<nvpair id="ipmilan_node1-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node1-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="ipmilan_node1-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node1-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu01-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node1_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n01"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node1_mk-pdu02-instance_attributes-port" name="port" value="3"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node1_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="ipmilan_node2" type="fence_ipmilan">
<instance_attributes id="ipmilan_node2-instance_attributes">
<nvpair id="ipmilan_node2-instance_attributes-ipaddr" name="ipaddr" value="10.201.13.2"/>
<nvpair id="ipmilan_node2-instance_attributes-password" name="password" value="another secret p"/>
<nvpair id="ipmilan_node2-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="ipmilan_node2-instance_attributes-username" name="username" value="admin"/>
</instance_attributes>
<operations>
<op id="ipmilan_node2-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu01-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-ip" name="ip" value="10.201.2.3"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu01-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu01-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="stonith" id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp">
<instance_attributes id="apc_snmp_node2_mk-pdu02-instance_attributes">
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-ip" name="ip" value="10.201.2.4"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_host_list" name="pcmk_host_list" value="mk-a02n02"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-pcmk_off_action" name="pcmk_off_action" value="reboot"/>
<nvpair id="apc_snmp_node2_mk-pdu02-instance_attributes-port" name="port" value="4"/>
</instance_attributes>
<operations>
<op id="apc_snmp_node2_mk-pdu02-monitor-interval-60" interval="60" name="monitor"/>
</operations>
</primitive>
<primitive class="ocf" id="srv07-el6" provider="alteeve" type="server">
<instance_attributes id="srv07-el6-instance_attributes">
<nvpair id="srv07-el6-instance_attributes-name" name="name" value="srv07-el6"/>
</instance_attributes>
<meta_attributes id="srv07-el6-meta_attributes">
<nvpair id="srv07-el6-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv07-el6-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv07-el6-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv07-el6-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv07-el6-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv07-el6-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv07-el6-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv07-el6-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv07-el6-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv01-sql" provider="alteeve" type="server">
<instance_attributes id="srv01-sql-instance_attributes">
<nvpair id="srv01-sql-instance_attributes-name" name="name" value="srv01-sql"/>
</instance_attributes>
<meta_attributes id="srv01-sql-meta_attributes">
<nvpair id="srv01-sql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv01-sql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv01-sql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv01-sql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv01-sql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv01-sql-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv01-sql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv01-sql-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv01-sql-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv02-lab1" provider="alteeve" type="server">
<instance_attributes id="srv02-lab1-instance_attributes">
<nvpair id="srv02-lab1-instance_attributes-name" name="name" value="srv02-lab1"/>
</instance_attributes>
<meta_attributes id="srv02-lab1-meta_attributes">
<nvpair id="srv02-lab1-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv02-lab1-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv02-lab1-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv02-lab1-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv02-lab1-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv02-lab1-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv02-lab1-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv02-lab1-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv02-lab1-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv08-m2-psql" provider="alteeve" type="server">
<instance_attributes id="srv08-m2-psql-instance_attributes">
<nvpair id="srv08-m2-psql-instance_attributes-name" name="name" value="srv08-m2-psql"/>
</instance_attributes>
<meta_attributes id="srv08-m2-psql-meta_attributes">
<nvpair id="srv08-m2-psql-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv08-m2-psql-meta_attributes-migrate_to" name="migrate_to" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-stop" name="stop" value="INFINITY"/>
<nvpair id="srv08-m2-psql-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv08-m2-psql-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv08-m2-psql-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv08-m2-psql-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv08-m2-psql-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv08-m2-psql-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv08-m2-psql-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv03-lab2" provider="alteeve" type="server">
<instance_attributes id="srv03-lab2-instance_attributes">
<nvpair id="srv03-lab2-instance_attributes-name" name="name" value="srv03-lab2"/>
</instance_attributes>
<meta_attributes id="srv03-lab2-meta_attributes">
<nvpair id="srv03-lab2-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv03-lab2-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv03-lab2-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv03-lab2-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv03-lab2-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv03-lab2-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv03-lab2-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv03-lab2-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv04-lab3" provider="alteeve" type="server">
<instance_attributes id="srv04-lab3-instance_attributes">
<nvpair id="srv04-lab3-instance_attributes-name" name="name" value="srv04-lab3"/>
</instance_attributes>
<meta_attributes id="srv04-lab3-meta_attributes">
<nvpair id="srv04-lab3-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv04-lab3-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv04-lab3-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv04-lab3-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv04-lab3-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv04-lab3-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv04-lab3-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv04-lab3-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv05-lab4" provider="alteeve" type="server">
<instance_attributes id="srv05-lab4-instance_attributes">
<nvpair id="srv05-lab4-instance_attributes-name" name="name" value="srv05-lab4"/>
</instance_attributes>
<meta_attributes id="srv05-lab4-meta_attributes">
<nvpair id="srv05-lab4-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv05-lab4-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv05-lab4-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv05-lab4-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv05-lab4-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv05-lab4-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv05-lab4-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv05-lab4-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
<primitive class="ocf" id="srv06-lab5" provider="alteeve" type="server">
<instance_attributes id="srv06-lab5-instance_attributes">
<nvpair id="srv06-lab5-instance_attributes-name" name="name" value="srv06-lab5"/>
</instance_attributes>
<meta_attributes id="srv06-lab5-meta_attributes">
<nvpair id="srv06-lab5-meta_attributes-allow-migrate" name="allow-migrate" value="true"/>
<nvpair id="srv06-lab5-meta_attributes-target-role" name="target-role" value="Stopped"/>
</meta_attributes>
<operations>
<op id="srv06-lab5-migrate_from-interval-0s" interval="0s" name="migrate_from" timeout="600"/>
<op id="srv06-lab5-migrate_to-interval-0s" interval="0s" name="migrate_to" timeout="INFINITY"/>
<op id="srv06-lab5-monitor-interval-60" interval="60" name="monitor"/>
<op id="srv06-lab5-notify-interval-0s" interval="0s" name="notify" timeout="20"/>
<op id="srv06-lab5-start-interval-0s" interval="0s" name="start" on-fail="block" timeout="INFINITY"/>
<op id="srv06-lab5-stop-interval-0s" interval="0s" name="stop" on-fail="block" timeout="INFINITY"/>
</operations>
</primitive>
</resources>
<constraints>
<rsc_location id="location-srv07-el6-mk-a02n01-200" node="mk-a02n01" rsc="srv07-el6" score="200"/>
<rsc_location id="location-srv07-el6-mk-a02n02-100" node="mk-a02n02" rsc="srv07-el6" score="100"/>
<rsc_location id="location-srv01-sql-mk-a02n01-200" node="mk-a02n01" rsc="srv01-sql" score="200"/>
<rsc_location id="location-srv01-sql-mk-a02n02-100" node="mk-a02n02" rsc="srv01-sql" score="100"/>
<rsc_location id="location-srv02-lab1-mk-a02n01-200" node="mk-a02n01" rsc="srv02-lab1" score="200"/>
<rsc_location id="location-srv02-lab1-mk-a02n02-100" node="mk-a02n02" rsc="srv02-lab1" score="100"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n01-200" node="mk-a02n01" rsc="srv08-m2-psql" score="200"/>
<rsc_location id="location-srv08-m2-psql-mk-a02n02-100" node="mk-a02n02" rsc="srv08-m2-psql" score="100"/>
<rsc_location id="location-srv03-lab2-mk-a02n01-200" node="mk-a02n01" rsc="srv03-lab2" score="200"/>
<rsc_location id="location-srv03-lab2-mk-a02n02-100" node="mk-a02n02" rsc="srv03-lab2" score="100"/>
<rsc_location id="location-srv04-lab3-mk-a02n01-200" node="mk-a02n01" rsc="srv04-lab3" score="200"/>
<rsc_location id="location-srv04-lab3-mk-a02n02-100" node="mk-a02n02" rsc="srv04-lab3" score="100"/>
<rsc_location id="location-srv05-lab4-mk-a02n01-200" node="mk-a02n01" rsc="srv05-lab4" score="200"/>
<rsc_location id="location-srv05-lab4-mk-a02n02-100" node="mk-a02n02" rsc="srv05-lab4" score="100"/>
<rsc_location id="location-srv06-lab5-mk-a02n01-200" node="mk-a02n01" rsc="srv06-lab5" score="200"/>
<rsc_location id="location-srv06-lab5-mk-a02n02-100" node="mk-a02n02" rsc="srv06-lab5" score="100"/>
</constraints>
<fencing-topology>
<fencing-level devices="ipmilan_node1" id="fl-mk-a02n01-1" index="1" target="mk-a02n01"/>
<fencing-level devices="apc_snmp_node1_mk-pdu01,apc_snmp_node1_mk-pdu02" id="fl-mk-a02n01-2" index="2" target="mk-a02n01"/>
<fencing-level devices="ipmilan_node2" id="fl-mk-a02n02-1" index="1" target="mk-a02n02"/>
<fencing-level devices="apc_snmp_node2_mk-pdu01,apc_snmp_node2_mk-pdu02" id="fl-mk-a02n02-2" index="2" target="mk-a02n02"/>
</fencing-topology>
</configuration>
<status>
<node_state id="2" uname="mk-a02n02" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="2">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="15:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;15:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="5" rc-code="7" op-status="0" interval="0" last-rc-change="1602502371" last-run="1602502371" exec-time="2" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="31:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;31:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="648" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="17:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;17:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="13" rc-code="7" op-status="0" interval="0" last-rc-change="1602502372" last-run="1602502372" exec-time="0" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="35:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;35:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="59" rc-code="0" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="144" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="19:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;19:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="21" rc-code="7" op-status="0" interval="0" last-rc-change="1602502372" last-run="1602502372" exec-time="0" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="39:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;39:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="60" rc-code="0" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="620" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="21:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;21:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1448" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="22:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;22:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1444" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="23:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;23:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1448" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="24:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;24:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1444" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv03-lab2" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv03-lab2_last_0" operation_key="srv03-lab2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="25:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;25:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1449" queue-time="0" op-digest="c193be9678d079bb7eb92e0bdefb2c9f"/>
</lrm_resource>
<lrm_resource id="srv04-lab3" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv04-lab3_last_0" operation_key="srv04-lab3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="26:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;26:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1447" queue-time="0" op-digest="cb2426b2050bd79e2d7ca6ef986f4323"/>
</lrm_resource>
<lrm_resource id="srv05-lab4" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv05-lab4_last_0" operation_key="srv05-lab4_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="27:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;27:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="53" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1448" queue-time="0" op-digest="c738571d2348f506b23eda5a19a9b2ec"/>
</lrm_resource>
<lrm_resource id="srv06-lab5" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv06-lab5_last_0" operation_key="srv06-lab5_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="28:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;28:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n02" call-id="57" rc-code="7" op-status="0" interval="0" last-rc-change="1602502375" last-run="1602502375" exec-time="1480" queue-time="0" op-digest="750371be716fd8e695d423bf33be9d04"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
<node_state id="1" uname="mk-a02n01" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
<lrm id="1">
<lrm_resources>
<lrm_resource id="ipmilan_node1" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node1_last_0" operation_key="ipmilan_node1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="29:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;29:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="58" rc-code="0" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="156" queue-time="0" op-digest="230c3c46a7f39ff7a5ff7f1b8aa9f17d" op-secure-params=" password passwd " op-secure-digest="a8bb97c4c1cae8f90e445a0ce85ecc19"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu01_last_0" operation_key="apc_snmp_node1_mk-pdu01_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="2:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;2:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="9" rc-code="7" op-status="0" interval="0" last-rc-change="1602502372" last-run="1602502372" exec-time="0" queue-time="0" op-digest="6b6191eeb61cd595ab0a26ec9762f8aa" op-secure-params=" password passwd " op-secure-digest="1dc851b0efa605b4ec3f03e3a3ba62f7"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node1_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node1_mk-pdu02_last_0" operation_key="apc_snmp_node1_mk-pdu02_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="33:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;33:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="59" rc-code="0" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="622" queue-time="0" op-digest="f4b11aca778aa58d81b7fa096bfe3fb4" op-secure-params=" password passwd " op-secure-digest="78517effd4af72191ac2c0b9d8567fcd"/>
</lrm_resource>
<lrm_resource id="ipmilan_node2" type="fence_ipmilan" class="stonith">
<lrm_rsc_op id="ipmilan_node2_last_0" operation_key="ipmilan_node2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="4:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;4:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="17" rc-code="7" op-status="0" interval="0" last-rc-change="1602502372" last-run="1602502372" exec-time="0" queue-time="0" op-digest="e759a456df902485096d4a48725ed81c" op-secure-params=" password passwd " op-secure-digest="47989163387c397e63fa3acdbec0d274"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu01" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu01_last_0" operation_key="apc_snmp_node2_mk-pdu01_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="37:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:0;37:0:0:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="60" rc-code="0" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="633" queue-time="0" op-digest="3d4af69481cb01c8c8f0f8af95940b99" op-secure-params=" password passwd " op-secure-digest="fd2959d25b0a20f6d1bc630f7565fd78"/>
</lrm_resource>
<lrm_resource id="apc_snmp_node2_mk-pdu02" type="fence_apc_snmp" class="stonith">
<lrm_rsc_op id="apc_snmp_node2_mk-pdu02_last_0" operation_key="apc_snmp_node2_mk-pdu02_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="6:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;6:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="25" rc-code="7" op-status="0" interval="0" last-rc-change="1602502372" last-run="1602502372" exec-time="0" queue-time="0" op-digest="7787bf20740a07e14145707988b18000" op-secure-params=" password passwd " op-secure-digest="11d1e757682ff46234d9816e06534953"/>
</lrm_resource>
<lrm_resource id="srv07-el6" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv07-el6_last_0" operation_key="srv07-el6_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="7:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;7:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="29" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1466" queue-time="0" op-digest="41dcb3443c331f2fe7ae92962905159f"/>
</lrm_resource>
<lrm_resource id="srv01-sql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv01-sql_last_0" operation_key="srv01-sql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="8:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;8:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="33" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1427" queue-time="0" op-digest="7acff34e45470837bd51c6d670b9878b"/>
</lrm_resource>
<lrm_resource id="srv02-lab1" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv02-lab1_last_0" operation_key="srv02-lab1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="9:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;9:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="37" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1398" queue-time="0" op-digest="c7a4471d0df53d7aab5392a1ba7d67e1"/>
</lrm_resource>
<lrm_resource id="srv08-m2-psql" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv08-m2-psql_last_0" operation_key="srv08-m2-psql_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="10:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;10:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="41" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1366" queue-time="0" op-digest="79b65e1a3736d1835da977ef2dee200d"/>
</lrm_resource>
<lrm_resource id="srv03-lab2" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv03-lab2_last_0" operation_key="srv03-lab2_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="11:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;11:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="45" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1369" queue-time="0" op-digest="c193be9678d079bb7eb92e0bdefb2c9f"/>
</lrm_resource>
<lrm_resource id="srv04-lab3" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv04-lab3_last_0" operation_key="srv04-lab3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="12:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;12:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="49" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1364" queue-time="0" op-digest="cb2426b2050bd79e2d7ca6ef986f4323"/>
</lrm_resource>
<lrm_resource id="srv05-lab4" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv05-lab4_last_0" operation_key="srv05-lab4_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="13:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;13:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="53" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1458" queue-time="0" op-digest="c738571d2348f506b23eda5a19a9b2ec"/>
</lrm_resource>
<lrm_resource id="srv06-lab5" type="server" class="ocf" provider="alteeve">
<lrm_rsc_op id="srv06-lab5_last_0" operation_key="srv06-lab5_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.3.0" transition-key="14:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" transition-magic="0:7;14:0:7:2b9d6b7e-5be6-467d-a3bd-f42e3b7718c7" exit-reason="" on_node="mk-a02n01" call-id="57" rc-code="7" op-status="0" interval="0" last-rc-change="1602502380" last-run="1602502380" exec-time="1415" queue-time="0" op-digest="750371be716fd8e695d423bf33be9d04"/>
</lrm_resource>
</lrm_resources>
</lrm>
</node_state>
</status>
</cib>
';
exit;
$anvil->Cluster->parse_cib({debug => 2, cib => $cib});
my $server_name = $anvil->data->{switches}{server} ? $anvil->data->{switches}{server} : "srv07-el6";
if ($anvil->data->{switches}{boot})
{
print "Booting: [".$server_name."]\n";
$anvil->Server->boot_virsh({
debug => 2,
server => $server_name,
});
}
elsif ($anvil->data->{switches}{'shutdown'})
{
print "Shutting down: [".$server_name."]\n";
$anvil->Server->shutdown_virsh({
debug => 2,
server => $server_name,
});
}
exit;

Loading…
Cancel
Save