* Fixed a bug in Database->check_agent_data() where the list of tables wasn't passed in, and thus the table list wasn't then passed on to Database->_find_behind_databases(). * Started work on a new method called Storage->parse_lsblk(). Signed-off-by: Digimer <digimer@alteeve.ca>main
parent
cda51e562d
commit
4d5ec72026
9 changed files with 975 additions and 524 deletions
@ -0,0 +1,464 @@ |
|||||||
|
#!/usr/bin/perl |
||||||
|
# |
||||||
|
# This scans the nodes and DR host for DRBD resources and their states. |
||||||
|
# |
||||||
|
# NOTE: The data stored here is not bound to a given host. As such, only hosted VMs are processed. |
||||||
|
# |
||||||
|
# Examples; |
||||||
|
# |
||||||
|
# Exit codes; |
||||||
|
# 0 = Normal exit. |
||||||
|
# 1 = Startup failure (not running as root, no DB, bad file read, etc) |
||||||
|
# 2 = DRBD not found or configured. |
||||||
|
# |
||||||
|
# TODO: |
||||||
|
# - |
||||||
|
# |
||||||
|
|
||||||
|
use strict; |
||||||
|
use warnings; |
||||||
|
use Anvil::Tools; |
||||||
|
use Data::Dumper; |
||||||
|
|
||||||
|
# Disable buffering |
||||||
|
$| = 1; |
||||||
|
|
||||||
|
# Prevent a discrepency between UID/GID and EUID/EGID from throwing an error. |
||||||
|
$< = $>; |
||||||
|
$( = $); |
||||||
|
|
||||||
|
my $THIS_FILE = ($0 =~ /^.*\/(.*)$/)[0]; |
||||||
|
my $running_directory = ($0 =~ /^(.*?)\/$THIS_FILE$/)[0]; |
||||||
|
if (($running_directory =~ /^\./) && ($ENV{PWD})) |
||||||
|
{ |
||||||
|
$running_directory =~ s/^\./$ENV{PWD}/; |
||||||
|
} |
||||||
|
|
||||||
|
my $anvil = Anvil::Tools->new({log_level => 2, log_secure => 1}); |
||||||
|
$anvil->Log->level({set => 2}); |
||||||
|
$anvil->Log->secure({set => 1}); |
||||||
|
|
||||||
|
$anvil->data->{'scan-drbd'} = { |
||||||
|
resource_status => "/sys/kernel/debug/drbd/resources", |
||||||
|
config_directory => "/etc/drbd.d", |
||||||
|
}; |
||||||
|
|
||||||
|
# Make sure we're running as 'root' |
||||||
|
# $< == real UID, $> == effective UID |
||||||
|
if (($< != 0) && ($> != 0)) |
||||||
|
{ |
||||||
|
# Not root |
||||||
|
print $anvil->Words->string({key => "error_0005"})."\n"; |
||||||
|
$anvil->nice_exit({exit_code => 1}); |
||||||
|
} |
||||||
|
|
||||||
|
$anvil->data->{scancore}{'scan-drbd'}{disable} = 0; |
||||||
|
$anvil->data->{scancore}{'scan-drbd'}{'auto-undefine'} = 1; |
||||||
|
$anvil->data->{switches}{force} = 0; |
||||||
|
|
||||||
|
$anvil->Storage->read_config(); |
||||||
|
|
||||||
|
# Read switches |
||||||
|
$anvil->Get->switches; |
||||||
|
|
||||||
|
# If we're disabled and '--force' wasn't used, exit. |
||||||
|
if (($anvil->data->{scancore}{'scan-drbd'}{disable}) && (not $anvil->data->{switches}{force})) |
||||||
|
{ |
||||||
|
# Exit. |
||||||
|
$anvil->nice_exit({exit_code => 0}); |
||||||
|
} |
||||||
|
|
||||||
|
if ($anvil->data->{switches}{purge}) |
||||||
|
{ |
||||||
|
# This can be called when doing bulk-database purges. |
||||||
|
my $schema_file = $anvil->data->{path}{directories}{scan_agents}."/".$THIS_FILE."/".$THIS_FILE.".sql"; |
||||||
|
$anvil->Database->purge_data({ |
||||||
|
debug => 2, |
||||||
|
tables => $anvil->Database->get_tables_from_schema({schema_file => $schema_file}), |
||||||
|
}); |
||||||
|
$anvil->nice_exit({exit_code => 0}); |
||||||
|
} |
||||||
|
|
||||||
|
# Handle start-up tasks |
||||||
|
my $problem = $anvil->ScanCore->agent_startup({agent => $THIS_FILE}); |
||||||
|
if ($problem) |
||||||
|
{ |
||||||
|
$anvil->nice_exit({exit_code => 1}); |
||||||
|
} |
||||||
|
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_drbd_log_0001", variables => { program => $THIS_FILE }}); |
||||||
|
|
||||||
|
if (not gather_data($anvil)) |
||||||
|
{ |
||||||
|
# DRBD not found or configured. |
||||||
|
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, 'print' => 1, level => 0, key => "scan_drbd_error_0001"}); |
||||||
|
$anvil->nice_exit({exit_code => 2}); |
||||||
|
} |
||||||
|
|
||||||
|
|
||||||
|
# Update the database |
||||||
|
$anvil->Database->insert_or_update_updated({updated_by => $THIS_FILE}); |
||||||
|
|
||||||
|
# Clean up and go away. |
||||||
|
$anvil->nice_exit({exit_code => 0}); |
||||||
|
|
||||||
|
|
||||||
|
############################################################################################################# |
||||||
|
# Functions # |
||||||
|
############################################################################################################# |
||||||
|
|
||||||
|
sub gather_data |
||||||
|
{ |
||||||
|
my ($anvil) = @_; |
||||||
|
|
||||||
|
if ((not -e $anvil->data->{path}{exe}{drbdadm}) or ($anvil->Get->host_type eq "striker")) |
||||||
|
{ |
||||||
|
# DRBD isn't installed or this is a striker node. |
||||||
|
return(0); |
||||||
|
} |
||||||
|
|
||||||
|
# Parse drbdadm |
||||||
|
my ($drbd_xml, $return_code) = $anvil->System->call({shell_call => $anvil->data->{path}{exe}{drbdadm}." dump-xml"}); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { drbd_xml => $drbd_xml, return_code => $return_code }}); |
||||||
|
if ($return_code) |
||||||
|
{ |
||||||
|
# Failed to dump the XML. |
||||||
|
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_drbd_error_0002", variables => { return_code => $return_code }}); |
||||||
|
return(0); |
||||||
|
} |
||||||
|
else |
||||||
|
{ |
||||||
|
local $@; |
||||||
|
my $dom = eval { XML::LibXML->load_xml(string => $drbd_xml); }; |
||||||
|
if ($@) |
||||||
|
{ |
||||||
|
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "scan_drbd_error_0003", variables => { |
||||||
|
xml => $drbd_xml, |
||||||
|
error => $@, |
||||||
|
}}); |
||||||
|
return(0); |
||||||
|
} |
||||||
|
else |
||||||
|
{ |
||||||
|
# Successful parse! |
||||||
|
### TODO: Might be best to config these default values by calling/parsing |
||||||
|
### 'drbdsetup show <resource> --show-defaults'. |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_host_uuid} = $anvil->Get->host_uuid; |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_common_xml} = $drbd_xml; |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_flush_disk} = 1; |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_flush_md} = 1; |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_timeout} = 6; # Default is '60', 6 seconds |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed} = 0; |
||||||
|
|
||||||
|
foreach my $name ($dom->findnodes('/config/common/section')) |
||||||
|
{ |
||||||
|
my $section = $name->{name}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { section => $section }}); |
||||||
|
foreach my $option_name ($name->findnodes('./option')) |
||||||
|
{ |
||||||
|
my $variable = $option_name->{name}; |
||||||
|
my $value = $option_name->{value}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { |
||||||
|
's1:variable' => $variable, |
||||||
|
's2:value' => $value, |
||||||
|
}}); |
||||||
|
|
||||||
|
if ($section eq "net") |
||||||
|
{ |
||||||
|
if ($variable eq "timeout") |
||||||
|
{ |
||||||
|
$value /= 10; |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_timeout} = ($value / 10); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::scan_drbd::scan_drbd_timeout" => $anvil->data->{new}{scan_drbd}{scan_drbd_timeout}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
if ($section eq "disk") |
||||||
|
{ |
||||||
|
if ($variable eq "disk-flushes") |
||||||
|
{ |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_flush_disk} = $value eq "no" ? 0 : 1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::scan_drbd::scan_drbd_flush_disk" => $anvil->data->{new}{scan_drbd}{scan_drbd_flush_disk}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
if ($variable eq "md-flushes") |
||||||
|
{ |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_flush_md} = $value eq "no" ? 0 : 1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::scan_drbd::scan_drbd_flush_md" => $anvil->data->{new}{scan_drbd}{scan_drbd_flush_md}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
foreach my $name ($dom->findnodes('/config/resource')) |
||||||
|
{ |
||||||
|
my $resource = $name->{name}; |
||||||
|
my $conf_file = $name->{'conf-file-line'}; |
||||||
|
$conf_file =~ s/:\d+$//; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
's1:resource' => $resource, |
||||||
|
's2:conf_file' => $conf_file, |
||||||
|
}}); |
||||||
|
|
||||||
|
$anvil->data->{new}{resource}{$resource}{xml} = $name; |
||||||
|
$anvil->data->{new}{resource}{$resource}{up} = 0; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { |
||||||
|
"new::resource::${resource}::xml" => $anvil->data->{new}{resource}{$resource}{xml}, |
||||||
|
}}); |
||||||
|
|
||||||
|
foreach my $host ($name->findnodes('./host')) |
||||||
|
{ |
||||||
|
my $this_host_name = $host->{name}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { this_host_name => $this_host_name }}); |
||||||
|
|
||||||
|
next if (($this_host_name ne $anvil->Get->host_name) && ($this_host_name ne $anvil->Get->short_host_name)); |
||||||
|
|
||||||
|
foreach my $volume_vnr ($host->findnodes('./volume')) |
||||||
|
{ |
||||||
|
my $volume = $volume_vnr->{vnr}; |
||||||
|
my $meta_disk = $volume_vnr->findvalue('./meta-disk'); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
's1:volume' => $volume, |
||||||
|
's2:meta_disk' => $meta_disk, |
||||||
|
}}); |
||||||
|
|
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path} = $volume_vnr->findvalue('./device'); |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor} = $volume_vnr->findvalue('./device/@minor'); |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{size} = 0; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"s1:new::resource::${resource}::volume::${volume}::device_path" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_path}, |
||||||
|
"s2:new::resource::${resource}::volume::${volume}::device_minor" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
foreach my $connection ($name->findnodes('./connection')) |
||||||
|
{ |
||||||
|
my $peer = ""; |
||||||
|
foreach my $host ($connection->findnodes('./host')) |
||||||
|
{ |
||||||
|
my $this_host_name = $host->{name}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 3, list => { this_host_name => $this_host_name }}); |
||||||
|
|
||||||
|
next if (($this_host_name eq $anvil->Get->host_name) or ($this_host_name eq $anvil->Get->short_host_name)); |
||||||
|
|
||||||
|
$peer = $this_host_name; |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{peer_ip_address} = $host->findvalue('./address'); |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{tcp_port} = $host->findvalue('./address/@port');; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"s1:new::resource::${resource}::peer::${peer}::peer_ip_address" => $anvil->data->{new}{resource}{$resource}{peer}{$peer}{peer_ip_address}, |
||||||
|
"s2:new::resource::${resource}::peer::${peer}::tcp_port" => $anvil->data->{new}{resource}{$resource}{peer}{$peer}{tcp_port}, |
||||||
|
}}); |
||||||
|
|
||||||
|
|
||||||
|
# Setup some default values. |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{protocol} = "unknown"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{fencing} = "unknown"; |
||||||
|
foreach my $volume (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}{$resource}{volume}}) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{connection_state} = "disconnected"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} = "down"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_disk_state} = "unknown"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_role} = "down"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_role} = "unknown"; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{out_of_sync_size} = -1; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{replication_speed} = 0; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{estimated_time_to_sync} = 0; |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
foreach my $name ($connection->findnodes('./section')) |
||||||
|
{ |
||||||
|
my $section = $name->{name}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { section => $section }}); |
||||||
|
|
||||||
|
foreach my $option_name ($name->findnodes('./option')) |
||||||
|
{ |
||||||
|
my $variable = $option_name->{name}; |
||||||
|
my $value = $option_name->{value}; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
's1:variable' => $variable, |
||||||
|
's2:value' => $value, |
||||||
|
}}); |
||||||
|
|
||||||
|
if ($section eq "net") |
||||||
|
{ |
||||||
|
if ($variable eq "protocol") |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{protocol} = $value; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::peer::${peer}::protocol" => $anvil->data->{new}{resource}{$resource}{peer}{$peer}{protocol}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
if ($variable eq "fencing") |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{peer}{$peer}{fencing} = $value; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::peer::${peer}::fencing" => $anvil->data->{new}{resource}{$resource}{peer}{$peer}{fencing}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
|
||||||
|
local(*DIRECTORY); |
||||||
|
opendir(DIRECTORY, $anvil->data->{'scan-drbd'}{resource_status}); |
||||||
|
while(my $file = readdir(DIRECTORY)) |
||||||
|
{ |
||||||
|
next if $file eq "."; |
||||||
|
next if $file eq ".."; |
||||||
|
my $full_path = $anvil->data->{'scan-drbd'}{resource_status}."/".$file; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { full_path => $full_path }}); |
||||||
|
if (-d $full_path) |
||||||
|
{ |
||||||
|
my $resource = $file; |
||||||
|
$anvil->data->{new}{resource}{$resource}{up} = 1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::up" => $anvil->data->{new}{resource}{$resource}{up}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
closedir(DIRECTORY); |
||||||
|
|
||||||
|
#print "Sync progress:\n"; |
||||||
|
#print " ".sprintf("%-${longest_resource}s", "Res")." ".sprintf("%-${longest_connection}s", "To")." Vol\n"; |
||||||
|
foreach my $resource (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}}) |
||||||
|
{ |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::up" => $anvil->data->{new}{resource}{$resource}{up}, |
||||||
|
}}); |
||||||
|
|
||||||
|
# If the resource isn't up, there's won't be a proc file to read. |
||||||
|
next if not $anvil->data->{new}{resource}{$resource}{up}; |
||||||
|
|
||||||
|
foreach my $volume (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}{$resource}{volume}}) |
||||||
|
{ |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { volume => $volume }}); |
||||||
|
|
||||||
|
foreach my $peer (sort {$a cmp $b} keys %{$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}}) |
||||||
|
{ |
||||||
|
my $proc_file = $anvil->data->{'scan-drbd'}{resource_status}."/".$resource."/connections/".$peer."/".$volume."/proc_drbd"; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { proc_file => $proc_file }}); |
||||||
|
|
||||||
|
my $file_body = $anvil->Storage->read_file({file => $proc_file}); |
||||||
|
my $progress = ""; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { file_body => $file_body }}); |
||||||
|
foreach my $line (split/\n/, $file_body) |
||||||
|
{ |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { line => $line }}); |
||||||
|
|
||||||
|
if ($line =~ /cs:(.*?) /) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{connection_state} = $1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::connection_state" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{connection_state}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
if ($line =~ /ro:(.*?)\/(.*?) /) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_role} = lc($1); |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_role} = lc($2); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::local_role" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_role}, |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::peer_role" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_role}, |
||||||
|
}}); |
||||||
|
|
||||||
|
# If the peer is secondary, read the device size. |
||||||
|
if ($anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_role} eq "secondary") |
||||||
|
{ |
||||||
|
# Get the size of the DRBD device. |
||||||
|
my ($size, $return_code) = $anvil->System->call({secure => 1, shell_call => $anvil->data->{path}{exe}{blockdev}." --getsize64 /dev/drbd".$anvil->data->{new}{resource}{$resource}{volume}{$volume}{device_minor}}); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
size => $size, |
||||||
|
return_code => $return_code, |
||||||
|
}}); |
||||||
|
if (not $return_code) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{size} = $size; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::size" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{size}." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{size}}).")", |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
if ($line =~ /ds:(.*?)\/(.*?) /) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state} = $1; |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_disk_state} = $2; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::local_disk_state" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{local_disk_state}, |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::peer_disk_state" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{peer_disk_state}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
if ($line =~ /oos:(\d+)/) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{out_of_sync_size} = $1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::out_of_sync_size" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{out_of_sync_size}, |
||||||
|
}}); |
||||||
|
} |
||||||
|
=cut |
||||||
|
0: cs:Established ro:Secondary/Secondary ds:Inconsistent/Inconsistent C r----- |
||||||
|
ns:0 nr:0 dw:0 dr:0 al:0 bm:0 lo:0 pe:[0;0] ua:0 ap:[0;0] ep:1 wo:1 oos:0 |
||||||
|
resync: used:0/61 hits:0 misses:0 starving:0 locked:0 changed:0 |
||||||
|
act_log: used:0/1237 hits:0 misses:0 starving:0 locked:0 changed:0 |
||||||
|
blocked on activity log: 0/0/0 |
||||||
|
|
||||||
|
0: cs:SyncTarget ro:Secondary/Primary ds:Inconsistent/UpToDate C r----- |
||||||
|
ns:0 nr:648960 dw:648728 dr:0 al:0 bm:0 lo:4 pe:[0;1] ua:4 ap:[0;0] ep:1 wo:1 oos:20321476 |
||||||
|
[>....................] sync'ed: 3.2% (19844/20476)M |
||||||
|
finish: 0:03:39 speed: 92,672 (92,936 -- 92,672) want: 2,880 K/sec |
||||||
|
3% sector pos: 1298032/41940408 |
||||||
|
resync: used:1/61 hits:31926 misses:10 starving:0 locked:0 changed:5 |
||||||
|
act_log: used:0/1237 hits:0 misses:0 starving:0 locked:0 changed:0 |
||||||
|
blocked on activity log: 0/0/0 |
||||||
|
=cut |
||||||
|
if ($line =~ /sync'ed: (.*?\%)/) |
||||||
|
{ |
||||||
|
$progress .= $1; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { progress => $progress }}); |
||||||
|
} |
||||||
|
if ($line =~ /speed: (.*?) \(/) |
||||||
|
{ |
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{replication_speed} = ($1 * 1024); |
||||||
|
$anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed} += $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{replication_speed}; |
||||||
|
|
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"s1:new::resource::${resource}::volume::${volume}::peer::${peer}::replication_speed" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{replication_speed}." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{replication_speed}}).")", |
||||||
|
"s2:new::scan_drbd::scan_drbd_total_sync_speed" => $anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed}." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed}}).")", |
||||||
|
}}); |
||||||
|
|
||||||
|
} |
||||||
|
if ($line =~ /finish: (\d+):(\d+):(\d+) /) |
||||||
|
{ |
||||||
|
my $hours = $1; |
||||||
|
my $minutes = $2; |
||||||
|
my $seconds = $3; |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
's1:hours' => $hours, |
||||||
|
's2:minutes' => $minutes, |
||||||
|
's3:seconds' => $seconds, |
||||||
|
}}); |
||||||
|
|
||||||
|
$anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{estimated_time_to_sync} = (($hours ** 3600) + ($minutes ** 60) + $seconds); |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"new::resource::${resource}::volume::${volume}::peer::${peer}::estimated_time_to_sync" => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{estimated_time_to_sync}." (".$anvil->Convert->time({'time' => $anvil->data->{new}{resource}{$resource}{volume}{$volume}{peer}{$peer}{estimated_time_to_sync}, long => 1}).")", |
||||||
|
}}); |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
} |
||||||
|
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { |
||||||
|
"s2:new::scan_drbd::scan_drbd_total_sync_speed" => $anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed}." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $anvil->data->{new}{scan_drbd}{scan_drbd_total_sync_speed}}).")", |
||||||
|
}}); |
||||||
|
|
||||||
|
return(1); |
||||||
|
} |
@ -0,0 +1,357 @@ |
|||||||
|
-- This is the database schema for the 'scan-drbd' Scan Agent. |
||||||
|
|
||||||
|
CREATE TABLE scan_drbd ( |
||||||
|
scan_drbd_uuid uuid not null primary key, |
||||||
|
scan_drbd_host_uuid uuid not null, |
||||||
|
scan_drbd_common_xml text not null, -- This is the raw <common> section of 'drbdadm dump-xml'. |
||||||
|
scan_drbd_flush_disk boolean not null, -- Set to true when disk flushes are enabled (only safe to be false when FBWC is used) |
||||||
|
scan_drbd_flush_md boolean not null, -- Set to true when meta-data flushes are enabled (only safe to be false when FBWC is used) |
||||||
|
scan_drbd_timeout numeric not null, -- This is how long we'll wait for a response from a peer (in seconds) before declaring it lost. |
||||||
|
scan_drbd_total_sync_speed numeric not null, -- This is the current total sync speed across all resync'ing volumes |
||||||
|
modified_date timestamp with time zone not null, |
||||||
|
|
||||||
|
FOREIGN KEY(scan_drbd_host_uuid) REFERENCES hosts(host_uuid) |
||||||
|
); |
||||||
|
ALTER TABLE scan_drbd OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TABLE history.scan_drbd ( |
||||||
|
history_id bigserial, |
||||||
|
scan_drbd_uuid uuid, |
||||||
|
scan_drbd_host_uuid uuid, |
||||||
|
scan_drbd_common_xml text, |
||||||
|
scan_drbd_flush_disk boolean, |
||||||
|
scan_drbd_flush_md boolean, |
||||||
|
scan_drbd_timeout numeric, |
||||||
|
scan_drbd_total_sync_speed numeric, |
||||||
|
modified_date timestamp with time zone not null |
||||||
|
); |
||||||
|
ALTER TABLE history.scan_drbd OWNER TO admin; |
||||||
|
|
||||||
|
CREATE FUNCTION history_scan_drbd() RETURNS trigger |
||||||
|
AS $$ |
||||||
|
DECLARE |
||||||
|
history_scan_drbd RECORD; |
||||||
|
BEGIN |
||||||
|
SELECT INTO history_scan_drbd * FROM scan_drbd WHERE scan_drbd_uuid=new.scan_drbd_uuid; |
||||||
|
INSERT INTO history.scan_drbd |
||||||
|
(scan_drbd_uuid, |
||||||
|
scan_drbd_host_uuid, |
||||||
|
scan_drbd_common_xml, |
||||||
|
scan_drbd_flush_disk, |
||||||
|
scan_drbd_flush_md, |
||||||
|
scan_drbd_timeout, |
||||||
|
scan_drbd_total_sync_speed, |
||||||
|
modified_date) |
||||||
|
VALUES |
||||||
|
(history_scan_drbd.scan_drbd_uuid, |
||||||
|
history_scan_drbd.scan_drbd_host_uuid, |
||||||
|
history_scan_drbd.scan_drbd_common_xml, |
||||||
|
history_scan_drbd.scan_drbd_flush_disk, |
||||||
|
history_scan_drbd.scan_drbd_flush_md, |
||||||
|
history_scan_drbd.scan_drbd_timeout, |
||||||
|
history_scan_drbd.scan_drbd_total_sync_speed, |
||||||
|
history_scan_drbd.modified_date); |
||||||
|
RETURN NULL; |
||||||
|
END; |
||||||
|
$$ |
||||||
|
LANGUAGE plpgsql; |
||||||
|
ALTER FUNCTION history_scan_drbd() OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TRIGGER trigger_scan_drbd |
||||||
|
AFTER INSERT OR UPDATE ON scan_drbd |
||||||
|
FOR EACH ROW EXECUTE PROCEDURE history_scan_drbd(); |
||||||
|
|
||||||
|
|
||||||
|
-- This is mostly an anchor for the connections and volumes table |
||||||
|
CREATE TABLE scan_drbd_resources ( |
||||||
|
scan_drbd_resource_uuid uuid not null primary key, |
||||||
|
scan_drbd_resource_host_uuid uuid not null, |
||||||
|
scan_drbd_resource_name text not null, -- The name of the resource. |
||||||
|
scan_drbd_resource_xml text not null, -- This is the raw <common> section of 'drbd_resourceadm dump-xml'. |
||||||
|
modified_date timestamp with time zone not null, |
||||||
|
|
||||||
|
FOREIGN KEY(scan_drbd_resource_host_uuid) REFERENCES hosts(host_uuid) |
||||||
|
); |
||||||
|
ALTER TABLE scan_drbd_resources OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TABLE history.scan_drbd_resources ( |
||||||
|
history_id bigserial, |
||||||
|
scan_drbd_resource_uuid uuid, |
||||||
|
scan_drbd_resource_host_uuid uuid, |
||||||
|
scan_drbd_resource_name text, |
||||||
|
scan_drbd_resource_xml text, |
||||||
|
modified_date timestamp with time zone not null |
||||||
|
); |
||||||
|
ALTER TABLE history.scan_drbd_resources OWNER TO admin; |
||||||
|
|
||||||
|
CREATE FUNCTION history_scan_drbd_resources() RETURNS trigger |
||||||
|
AS $$ |
||||||
|
DECLARE |
||||||
|
history_scan_drbd_resources RECORD; |
||||||
|
BEGIN |
||||||
|
SELECT INTO history_scan_drbd_resources * FROM scan_drbd_resources WHERE scan_drbd_resource_uuid=new.scan_drbd_resource_uuid; |
||||||
|
INSERT INTO history.scan_drbd_resources |
||||||
|
(scan_drbd_resource_uuid, |
||||||
|
scan_drbd_resource_host_uuid, |
||||||
|
scan_drbd_resource_name, |
||||||
|
scan_drbd_resource_xml, |
||||||
|
modified_date) |
||||||
|
VALUES |
||||||
|
(history_scan_drbd_resources.scan_drbd_resource_uuid, |
||||||
|
history_scan_drbd_resources.scan_drbd_resource_host_uuid, |
||||||
|
history_scan_drbd_resources.scan_drbd_resource_name, |
||||||
|
history_scan_drbd_resources.scan_drbd_resource_xml, |
||||||
|
history_scan_drbd_resources.modified_date); |
||||||
|
RETURN NULL; |
||||||
|
END; |
||||||
|
$$ |
||||||
|
LANGUAGE plpgsql; |
||||||
|
ALTER FUNCTION history_scan_drbd_resources() OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TRIGGER trigger_scan_drbd_resources |
||||||
|
AFTER INSERT OR UPDATE ON scan_drbd_resources |
||||||
|
FOR EACH ROW EXECUTE PROCEDURE history_scan_drbd_resources(); |
||||||
|
|
||||||
|
|
||||||
|
-- Volumes under resources. |
||||||
|
-- |
||||||
|
-- Disk States; |
||||||
|
-- Diskless - No local block device has been assigned to the DRBD driver. This may mean that the resource |
||||||
|
-- has never attached to its backing device, that it has been manually detached using drbdadm |
||||||
|
-- detach, or that it automatically detached after a lower-level I/O error. |
||||||
|
-- Inconsistent - The data is inconsistent. This status occurs immediately upon creation of a new resource, |
||||||
|
-- on both nodes (before the initial full sync). Also, this status is found in one node (the |
||||||
|
-- synchronization target) during synchronization. |
||||||
|
-- Outdated - Resource data is consistent, but outdated. |
||||||
|
-- DUnknown - This state is used for the peer disk if no network connection is available. |
||||||
|
-- Consistent - Consistent data of a node without connection. When the connection is established, it is |
||||||
|
-- decided whether the data is UpToDate or Outdated. |
||||||
|
-- UpToDate - Consistent, up-to-date state of the data. This is the normal state |
||||||
|
-- |
||||||
|
-- NOTE: Transient states are not recorded, but are below for completeness sake |
||||||
|
-- Attaching - Transient state while reading meta data. |
||||||
|
-- Detaching - Transient state while detaching and waiting for ongoing IOs to complete. |
||||||
|
-- Failed - Transient state following an I/O failure report by the local block device. Next state: |
||||||
|
-- Diskless. |
||||||
|
-- Negotiating - Transient state when an Attach is carried out on an already-Connected DRBD device. |
||||||
|
-- |
||||||
|
-- Resource Roles ; |
||||||
|
-- Primary - The resource is currently in the primary role, and may be read from and written to. This role |
||||||
|
-- only occurs on one of the two nodes, unless dual-primary mode is enabled. |
||||||
|
-- Secondary - The resource is currently in the secondary role. It normally receives updates from its peer |
||||||
|
-- (unless running in disconnected mode), but may neither be read from nor written to. This role |
||||||
|
-- may occur on one or both nodes. |
||||||
|
-- Unknown - The resource’s role is currently unknown. The local resource role never has this status. It is |
||||||
|
-- only displayed for the peer’s resource role, and only in disconnected mode. |
||||||
|
-- |
||||||
|
-- Replication states; |
||||||
|
-- Off - The volume is not replicated over this connection, since the connection is not Connected. |
||||||
|
-- Established - All writes to that volume are replicated online. This is the normal state. |
||||||
|
-- StartingSyncS - Full synchronization, initiated by the administrator, is just starting. The next possible |
||||||
|
-- states are: SyncSource or PausedSyncS. |
||||||
|
-- StartingSyncT - Full synchronization, initiated by the administrator, is just starting. Next state: |
||||||
|
-- WFSyncUUID. |
||||||
|
-- WFBitMapS - Partial synchronization is just starting. Next possible states: SyncSource or PausedSyncS. |
||||||
|
-- WFBitMapT - Partial synchronization is just starting. Next possible state: WFSyncUUID. |
||||||
|
-- WFSyncUUID - Synchronization is about to begin. Next possible states: SyncTarget or PausedSyncT. |
||||||
|
-- SyncSource - Synchronization is currently running, with the local node being the source of |
||||||
|
-- synchronization. |
||||||
|
-- SyncTarget - Synchronization is currently running, with the local node being the target of |
||||||
|
-- synchronization. |
||||||
|
-- PausedSyncS - The local node is the source of an ongoing synchronization, but synchronization is |
||||||
|
-- currently paused. This may be due to a dependency on the completion of another |
||||||
|
-- synchronization process, or due to synchronization having been manually interrupted by |
||||||
|
-- drbdadm pause-sync. |
||||||
|
-- PausedSyncT - The local node is the target of an ongoing synchronization, but synchronization is |
||||||
|
-- currently paused. This may be due to a dependency on the completion of another |
||||||
|
-- synchronization process, or due to synchronization having been manually interrupted by |
||||||
|
-- drbdadm pause-sync. |
||||||
|
-- VerifyS - On-line device verification is currently running, with the local node being the source of |
||||||
|
-- verification. |
||||||
|
-- VerifyT - On-line device verification is currently running, with the local node being the target of |
||||||
|
-- verification. |
||||||
|
-- Ahead - Data replication was suspended, since the link can not cope with the load. This state is |
||||||
|
-- enabled by the configuration on-congestion option (see Configuring congestion policies and |
||||||
|
-- suspended replication). |
||||||
|
-- Behind - Data replication was suspended by the peer, since the link can not cope with the load. |
||||||
|
-- This state is enabled by the configuration on-congestion option on the peer node (see |
||||||
|
-- Configuring congestion policies and suspended replication). |
||||||
|
-- |
||||||
|
-- Connection States; |
||||||
|
-- |
||||||
|
-- StandAlone - No network configuration available. The resource has not yet been connected, or has been administratively disconnected (using drbdadm disconnect), or has dropped its connection due to failed authentication or split brain. |
||||||
|
-- Connecting - This node is waiting until the peer node becomes visible on the network. |
||||||
|
-- Connected - A DRBD connection has been established, data mirroring is now active. This is the normal state. |
||||||
|
-- |
||||||
|
-- NOTE: Temporary states are not recorded, but are below for completeness sake |
||||||
|
-- Disconnecting - Temporary state during disconnection. The next state is StandAlone. |
||||||
|
-- Unconnected - Temporary state, prior to a connection attempt. Possible next states: Connecting. |
||||||
|
-- Timeout - Temporary state following a timeout in the communication with the peer. Next state: |
||||||
|
-- Unconnected. |
||||||
|
-- BrokenPipe - Temporary state after the connection to the peer was lost. Next state: Unconnected. |
||||||
|
-- NetworkFailure - Temporary state after the connection to the partner was lost. Next state: Unconnected. |
||||||
|
-- ProtocolError - Temporary state after the connection to the partner was lost. Next state: Unconnected. |
||||||
|
-- TearDown - Temporary state. The peer is closing the connection. Next state: Unconnected. |
||||||
|
|
||||||
|
-- NOTE: This table stores the information about this volume on the local host. |
||||||
|
CREATE TABLE scan_drbd_volumes ( |
||||||
|
scan_drbd_volume_uuid uuid not null primary key, |
||||||
|
scan_drbd_volume_host_uuid uuid not null, |
||||||
|
scan_drbd_volume_scan_drbd_resource_uuid uuid not null, |
||||||
|
scan_drbd_volume_number numeric not null, -- The name of the volume. |
||||||
|
scan_drbd_volume_device_path text not null, -- This is the device path to the DRBD resource |
||||||
|
scan_drbd_volume_device_minor numeric not null, -- This is the device minor number, which translates to '/dev/drbd<minor>' |
||||||
|
scan_drbd_volume_size numeric not null, -- This is size of the DRBD device (in bytes) |
||||||
|
modified_date timestamp with time zone not null, |
||||||
|
|
||||||
|
FOREIGN KEY(scan_drbd_volume_scan_drbd_resource_uuid) REFERENCES scan_drbd_resources(scan_drbd_resource_uuid), |
||||||
|
FOREIGN KEY(scan_drbd_volume_host_uuid) REFERENCES hosts(host_uuid) |
||||||
|
); |
||||||
|
ALTER TABLE scan_drbd_volumes OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TABLE history.scan_drbd_volumes ( |
||||||
|
history_id bigserial, |
||||||
|
scan_drbd_volume_uuid uuid, |
||||||
|
scan_drbd_volume_host_uuid uuid, |
||||||
|
scan_drbd_volume_scan_drbd_resource_uuid uuid, |
||||||
|
scan_drbd_volume_number numeric, |
||||||
|
scan_drbd_volume_device_path text, |
||||||
|
scan_drbd_volume_device_minor numeric, |
||||||
|
scan_drbd_volume_size numeric, |
||||||
|
modified_date timestamp with time zone not null |
||||||
|
); |
||||||
|
ALTER TABLE history.scan_drbd_volumes OWNER TO admin; |
||||||
|
|
||||||
|
CREATE FUNCTION history_scan_drbd_volumes() RETURNS trigger |
||||||
|
AS $$ |
||||||
|
DECLARE |
||||||
|
history_scan_drbd_volumes RECORD; |
||||||
|
BEGIN |
||||||
|
SELECT INTO history_scan_drbd_volumes * FROM scan_drbd_volumes WHERE scan_drbd_volume_uuid=new.scan_drbd_volume_uuid; |
||||||
|
INSERT INTO history.scan_drbd_volumes |
||||||
|
(scan_drbd_volume_uuid, |
||||||
|
scan_drbd_volume_host_uuid, |
||||||
|
scan_drbd_volume_scan_drbd_resource_uuid, |
||||||
|
scan_drbd_volume_number, |
||||||
|
scan_drbd_volume_device_path, |
||||||
|
scan_drbd_volume_device_minor, |
||||||
|
scan_drbd_volume_size, |
||||||
|
modified_date) |
||||||
|
VALUES |
||||||
|
(history_scan_drbd_volumes.scan_drbd_volume_uuid, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_host_uuid, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_scan_drbd_resource_uuid, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_number, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_device_path, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_device_minor, |
||||||
|
history_scan_drbd_volumes.scan_drbd_volume_size, |
||||||
|
history_scan_drbd_volumes.modified_date); |
||||||
|
RETURN NULL; |
||||||
|
END; |
||||||
|
$$ |
||||||
|
LANGUAGE plpgsql; |
||||||
|
ALTER FUNCTION history_scan_drbd_volumes() OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TRIGGER trigger_scan_drbd_volumes |
||||||
|
AFTER INSERT OR UPDATE ON scan_drbd_volumes |
||||||
|
FOR EACH ROW EXECUTE PROCEDURE history_scan_drbd_volumes(); |
||||||
|
|
||||||
|
|
||||||
|
-- This is the peer information for a given volume |
||||||
|
CREATE TABLE scan_drbd_peers ( |
||||||
|
scan_drbd_peer_uuid uuid not null primary key, |
||||||
|
scan_drbd_peer_host_uuid uuid not null, |
||||||
|
scan_drbd_peer_scan_drbd_volume_uuid uuid not null, |
||||||
|
scan_drbd_peer_peer_host_name text not null, -- The host name for this peer, as recorded in the config |
||||||
|
scan_drbd_peer_connection_state text not null, -- The connection state to the peer. See "Connection States" and "Replication States" above. |
||||||
|
scan_drbd_peer_local_disk_state text not null, -- The local disk state of the peer, see "Disk States" above. |
||||||
|
scan_drbd_peer_peer_disk_state text not null, -- The local disk state of the peer, see "Disk States" above. |
||||||
|
scan_drbd_peer_local_role text not null, -- The current local role of the peer. |
||||||
|
scan_drbd_peer_peer_role text not null, -- The current peer role of the peer. |
||||||
|
scan_drbd_peer_out_of_sync_size numeric not null, -- This is the number of "out of sync" bytes. Set to '0' when both sides are UpToDate. |
||||||
|
scan_drbd_peer_replication_speed numeric not null, -- This is how many bytes per second are being copied. Set to '0' when not synchronizing. |
||||||
|
scan_drbd_peer_estimated_time_to_sync numeric not null, -- This is the number of second that is *estimated* remaining in the resync. Set to '0' when both sides are UpToDate. |
||||||
|
scan_drbd_peer_peer_ip_address text not null, -- The (SN) IP address used for this peer. |
||||||
|
scan_drbd_peer_tcp_port numeric not null, -- This is the port number used for this peer. |
||||||
|
scan_drbd_peer_protocol text not null, -- This is 'A' for async peers (to DR, usually) or 'C' to sync peers (node peer and sometimes DR) |
||||||
|
scan_drbd_peer_fencing text not null, -- Set to 'resource-and-stonith' for node peers and 'dont-care' for DR hosts. |
||||||
|
modified_date timestamp with time zone not null, |
||||||
|
|
||||||
|
FOREIGN KEY(scan_drbd_peer_scan_drbd_volume_uuid) REFERENCES scan_drbd_resources(scan_drbd_resource_uuid), |
||||||
|
FOREIGN KEY(scan_drbd_peer_host_uuid) REFERENCES hosts(host_uuid) |
||||||
|
); |
||||||
|
ALTER TABLE scan_drbd_peers OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TABLE history.scan_drbd_peers ( |
||||||
|
history_id bigserial, |
||||||
|
scan_drbd_peer_uuid uuid, |
||||||
|
scan_drbd_peer_host_uuid uuid, |
||||||
|
scan_drbd_peer_scan_drbd_volume_uuid uuid, |
||||||
|
scan_drbd_peer_peer_host_name text, |
||||||
|
scan_drbd_peer_connection_state text, |
||||||
|
scan_drbd_peer_local_disk_state text, |
||||||
|
scan_drbd_peer_peer_disk_state text, |
||||||
|
scan_drbd_peer_local_role text, |
||||||
|
scan_drbd_peer_peer_role text, |
||||||
|
scan_drbd_peer_out_of_sync_size numeric, |
||||||
|
scan_drbd_peer_replication_speed numeric, |
||||||
|
scan_drbd_peer_estimated_time_to_sync numeric, |
||||||
|
scan_drbd_peer_peer_ip_address text, |
||||||
|
scan_drbd_peer_tcp_port numeric, |
||||||
|
scan_drbd_peer_protocol text, |
||||||
|
scan_drbd_peer_fencing text, |
||||||
|
modified_date timestamp with time zone not null |
||||||
|
); |
||||||
|
ALTER TABLE history.scan_drbd_peers OWNER TO admin; |
||||||
|
|
||||||
|
CREATE FUNCTION history_scan_drbd_peers() RETURNS trigger |
||||||
|
AS $$ |
||||||
|
DECLARE |
||||||
|
history_scan_drbd_peers RECORD; |
||||||
|
BEGIN |
||||||
|
SELECT INTO history_scan_drbd_peers * FROM scan_drbd_peers WHERE scan_drbd_peer_uuid=new.scan_drbd_peer_uuid; |
||||||
|
INSERT INTO history.scan_drbd_peers |
||||||
|
(scan_drbd_peer_uuid, |
||||||
|
scan_drbd_peer_host_uuid, |
||||||
|
scan_drbd_peer_scan_drbd_volume_uuid, |
||||||
|
scan_drbd_peer_peer_host_name, |
||||||
|
scan_drbd_peer_connection_state, |
||||||
|
scan_drbd_peer_local_disk_state, |
||||||
|
scan_drbd_peer_peer_disk_state, |
||||||
|
scan_drbd_peer_local_role, |
||||||
|
scan_drbd_peer_peer_role, |
||||||
|
scan_drbd_peer_out_of_sync_size, |
||||||
|
scan_drbd_peer_replication_speed, |
||||||
|
scan_drbd_peer_estimated_time_to_sync, |
||||||
|
scan_drbd_peer_peer_ip_address, |
||||||
|
scan_drbd_peer_tcp_port, |
||||||
|
scan_drbd_peer_protocol, |
||||||
|
scan_drbd_peer_fencing, |
||||||
|
modified_date) |
||||||
|
VALUES |
||||||
|
(history_scan_drbd_peers.scan_drbd_peer_uuid, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_host_uuid, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_scan_drbd_volume_uuid, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_peer_host_name, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_connection_state, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_local_disk_state, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_peer_disk_state, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_local_role, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_peer_role, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_out_of_sync_size, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_replication_speed, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_estimated_time_to_sync, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_peer_ip_address, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_tcp_port, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_protocol, |
||||||
|
history_scan_drbd_peers.scan_drbd_peer_fencing, |
||||||
|
history_scan_drbd_peers.modified_date); |
||||||
|
RETURN NULL; |
||||||
|
END; |
||||||
|
$$ |
||||||
|
LANGUAGE plpgsql; |
||||||
|
ALTER FUNCTION history_scan_drbd_peers() OWNER TO admin; |
||||||
|
|
||||||
|
CREATE TRIGGER trigger_scan_drbd_peers |
||||||
|
AFTER INSERT OR UPDATE ON scan_drbd_peers |
||||||
|
FOR EACH ROW EXECUTE PROCEDURE history_scan_drbd_peers(); |
@ -0,0 +1,39 @@ |
|||||||
|
<?xml version="1.0" encoding="UTF-8"?> |
||||||
|
|
||||||
|
<!-- |
||||||
|
Company: Alteeve's Niche, Inc. |
||||||
|
License: GPL v2+ |
||||||
|
Author: Madison Kelly <mkelly@alteeve.ca> |
||||||
|
|
||||||
|
NOTE: All string keys MUST be prefixed with the agent name! ie: 'scan_server_log_0001'. |
||||||
|
--> |
||||||
|
|
||||||
|
<words> |
||||||
|
<meta version="3.0.0" languages="en_CA,jp"/> |
||||||
|
<!-- Canadian English --> |
||||||
|
<language name="en_CA" long_name="Canadian English" description="ScanCore scan agent that monitors hardware, like RAM modules, CSS LED status, CPU information, etc."> |
||||||
|
|
||||||
|
<!-- Alert entries --> |
||||||
|
<key name="scan_drbd_alert_0001"></key> |
||||||
|
|
||||||
|
<!-- Error entries --> |
||||||
|
<key name="scan_drbd_error_0001">DRBD is not configured on this host, exiting.</key> |
||||||
|
<key name="scan_drbd_error_0002">The call to 'drbdadm dump-xml' returned the exit code: [#!variable!return_code!#].</key> |
||||||
|
<key name="scan_drbd_error_0003">[ Warning ] - Failed to parse the DRBD XML. The XML read was: |
||||||
|
======== |
||||||
|
#!variable!xml!# |
||||||
|
======== |
||||||
|
|
||||||
|
The error was: |
||||||
|
|
||||||
|
======== |
||||||
|
#!variable!error!# |
||||||
|
======== |
||||||
|
</key> |
||||||
|
|
||||||
|
<!-- Error entries --> |
||||||
|
<key name="scan_drbd_log_0001">Starting The: [#!variable!program!#] DRBD resource agent.</key> |
||||||
|
|
||||||
|
|
||||||
|
</language> |
||||||
|
</words> |
@ -1,37 +1,29 @@ |
|||||||
#!/usr/bin/perl |
#!/usr/bin/perl |
||||||
# |
# |
||||||
|
|
||||||
use warnings; |
|
||||||
use strict; |
use strict; |
||||||
|
use warnings; |
||||||
|
use Anvil::Tools; |
||||||
|
use Data::Dumper; |
||||||
|
|
||||||
my $sysstat_directory = "/var/log/sa/"; |
my $THIS_FILE = ($0 =~ /^.*\/(.*)$/)[0]; |
||||||
my $hostname = `hostname | cut -f 1 -d .`; |
my $running_directory = ($0 =~ /^(.*?)\/$THIS_FILE$/)[0]; |
||||||
|
if (($running_directory =~ /^\./) && ($ENV{PWD})) |
||||||
|
{ |
||||||
|
$running_directory =~ s/^\./$ENV{PWD}/; |
||||||
|
} |
||||||
|
|
||||||
opendir(my $directory_handle, $sysstat_directory) || die "Can't locate ".$sysstat_directory."\n"; |
# Turn off buffering so that the pinwheel will display while waiting for the SSH call(s) to complete. |
||||||
|
$| = 1; |
||||||
|
|
||||||
my @file_list = grep { /^sa[0-9]./ } readdir($directory_handle); |
my $anvil = Anvil::Tools->new(); |
||||||
|
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0115", variables => { program => $THIS_FILE }}); |
||||||
|
|
||||||
printf "Hostname is ... ".$hostname."\n"; |
# Read switches (target ([user@]host[:port]) and the file with the target's password. |
||||||
foreach my $filename (sort {$a cmp $b} @file_list) |
$anvil->Get->switches; |
||||||
{ |
|
||||||
#printf "Filepath: ....".$sysstat_directory.$filepath."\n" |
|
||||||
my $shell_call = "sadf -dht ".$sysstat_directory.$filename." -- -S -u -r -p -q -n DEV"; |
|
||||||
printf "Shell Call - ... ".$shell_call."\n"; |
|
||||||
open(my $file_handle, "$shell_call 2>&1 |") || die "Failed to parse output of [".$shell_call."].\n"; |
|
||||||
while (<$file_handle>) |
|
||||||
{ |
|
||||||
chomp; |
|
||||||
my $csv_line = $_; |
|
||||||
|
|
||||||
if ($csv_line =~ /$hostname/) |
# Connect to the database(s). |
||||||
{ |
$anvil->Database->connect; |
||||||
#printf "CSV Line... ".$csv_line."\n"; |
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0132"}); |
||||||
printf "Variable Match!\n"; |
|
||||||
} |
|
||||||
if ($csv_line =~ 'thinkpad-06HCV0') |
|
||||||
{ |
|
||||||
printf "String Match!\n"; |
|
||||||
} |
|
||||||
} |
|
||||||
|
|
||||||
} |
$anvil->nice_exit({exit_code => 0}); |
||||||
|
Loading…
Reference in new issue