* Finished the automatic archving of the database! New in M3; Archving happens on dashboards only, and whichever database triggers an archive will archive all data in any connected databases. This is a departure from where each host archived it's own data, but will avoid putting a disk or CPU load on server hosts.

Signed-off-by: Digimer <digimer@alteeve.ca>
main
Digimer 5 years ago
parent 650dc6eb80
commit 197f5ed90e
  1. 3
      Anvil/Tools.pm
  2. 86
      Anvil/Tools/Database.pm
  3. 8
      Anvil/Tools/Storage.pm
  4. 7
      share/words.xml

@ -865,11 +865,14 @@ sub _set_defaults
"network_interfaces",
"bonds",
"bridges",
"bridge_interfaces",
"ip_addresses",
"files",
"file_locations",
"servers",
"definitions",
"oui",
"mac_to_ip",
"updated",
"alert_sent",
"states",

@ -7320,7 +7320,6 @@ sub resync_databases
# Archive old data before resync'ing
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0451"});
$anvil->Database->archive_database({debug => $debug});
die;
### NOTE: Don't sort this array, we need to resync in the order that the user passed the tables to us
### to avoid trouble with primary/foreign keys.
@ -7975,10 +7974,17 @@ sub _archive_table
if (not $table)
{
# ...
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0020", variables => { method => "Convert->_archive_table()", parameter => "table" }});
return("!!error!!");
}
# We don't archive the OUI table, it generally has more entries than needed to trigger the archive, but it's needed.
if ($table eq "oui")
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "log_0459", variables => { table => $table }});
return(0);
}
# These values are sanity checked before this method is called.
my $compress = $anvil->data->{sys}{database}{archive}{compress};
my $directory = $anvil->data->{sys}{database}{archive}{directory};
@ -8013,10 +8019,10 @@ sub _archive_table
# Before we do any real analysis, do we have enough entries in the history schema to trigger an archive?
$query = "SELECT COUNT(*) FROM history.".$table.";";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { query => $query }});
$count = $anvil->Database->query({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__})->[0]->[0];
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"s1:uuid" => $uuid,
"s2:count" => $count,
}});
@ -8030,7 +8036,7 @@ sub _archive_table
my $to_remove = $count - $drop_to;
my $loops = (int($to_remove / $division) + 1);
my $records_per_loop = $anvil->Convert->round({number => ($to_remove / $loops)});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"s1:to_remove" => $to_remove,
"s2:loops" => $loops,
"s3:records_per_loop" => $records_per_loop,
@ -8045,11 +8051,11 @@ sub _archive_table
# There is enough data to trigger an archive, so lets get started with a list of columns in
# this table.
$query = "SELECT column_name FROM information_schema.columns WHERE table_schema = 'history' AND table_name = ".$anvil->Database->quote($table)." AND column_name != 'history_id' AND column_name != 'modified_date';";
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0124", variables => { query => $query }});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0124", variables => { query => $query }});
my $columns = $anvil->Database->query({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__});
my $column_count = @{$columns};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
columns => $columns,
column_count => $column_count
}});
@ -8060,16 +8066,28 @@ sub _archive_table
{
# We need to date stamp from the closest record to the offset.
$loop++;
my $sql_file = "COPY ".$table." (";
my $query = "SELECT modified_date FROM history.".$table." OFFSET ".$offset." LIMIT 1";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
my $sql_file = "
-- Dump created at: [".$anvil->Get->date_and_time()."]
SET statement_timeout = 0;
SET lock_timeout = 0;
SET idle_in_transaction_session_timeout = 0;
SET client_encoding = 'UTF8';
SET standard_conforming_strings = on;
SELECT pg_catalog.set_config('search_path', '', false);
SET check_function_bodies = false;
SET client_min_messages = warning;
SET row_security = off;
COPY history.".$table." (";
my $query = "SELECT modified_date FROM history.".$table." ORDER BY modified_date ASC OFFSET ".$offset." LIMIT 1;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
"s1:loop" => $loop,
"s2:query" => $query,
"s3:sql_file" => $sql_file,
}});
my $modified_date = $anvil->Database->query({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__})->[0]->[0];
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { modified_date => $modified_date }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { modified_date => $modified_date }});
# Build the query.
$query = "SELECT ";
@ -8079,7 +8097,11 @@ sub _archive_table
$query .= $column->[0].", ";
}
$sql_file .= "modified_date) FROM stdin;\n";
$query .= "modified_date FROM history.".$table." WHERE modified_date >= '".$modified_date."' ORDER BY modified_date ASC OFFSET ".$offset.";";
$query .= "modified_date FROM history.".$table." WHERE modified_date >= '".$modified_date."' ORDER BY modified_date ASC;";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
sql_file => $sql_file,
query => $query,
}});
my $results = $anvil->Database->query({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__});
my $count = @{$results};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => {
@ -8109,17 +8131,18 @@ sub _archive_table
$line .= $value."\t";
}
$sql_file .= $line."\n";
# The 'history_id' is NOT consistent between databases! So we don't record it here.
# Add the modified_date column.
$line .= $row->[$i]."\n";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { line => $line }});
$sql_file .= $line;
}
$sql_file .= "\\.\n\n";;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { sql_file => $sql_file }});
my $archive_file = $directory."/".$table.".".$anvil->Database->get_host_from_uuid({short => 1, host_uuid => $uuid}).".".$time_stamp.".".$loop.".out";
my $archive_file = $directory."/".$anvil->Database->get_host_from_uuid({short => 1, host_uuid => $uuid}).".".$table.".".$time_stamp.".".$loop.".out";
$archive_file =~ s/\/\//\//g;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { archive_file => $archive_file }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { archive_file => $archive_file }});
# It may not be secure, but we play it safe.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0454", variables => {
@ -8135,7 +8158,8 @@ sub _archive_table
mode => "0600",
secure => 1.
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { failed => $failed }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { failed => $failed }});
if ($failed)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 0, priority => "err", key => "error_0099", variables => {
@ -8146,28 +8170,38 @@ sub _archive_table
}
else
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 2, key => "log_0283"});
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0283"});
$vacuum = 1;
$query = "DELETE FROM history.".$table." WHERE modified_date >= '".$modified_date."';";
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { query => $query }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { query => $query }});
if ($compress)
{
# Whether the compression works or not doesn't break archiving, so we
# don't care if this fails.
my ($failed) = $anvil->Storage->compress({
debug => $debug,
file => $archive_file,
});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { failed => $failed }});
}
# Now actually remove the data.
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0457"});
my $query = "DELETE FROM history.".$table." WHERE modified_date >= '".$modified_date."';";
$anvil->Database->write({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__});
}
$offset -= $records_per_loop;
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { offset => $offset }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { offset => $offset }});
}
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => { vacuum => $vacuum }});
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => $debug, list => { vacuum => $vacuum }});
if ($vacuum)
{
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0458"});
my $query = "VACUUM FULL;";
$anvil->Database->write({debug => 2, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__});
$anvil->Database->write({debug => $debug, uuid => $uuid, query => $query, source => $THIS_FILE, line => __LINE__});
}
die;
}
return(0);

@ -766,7 +766,7 @@ sub compress
# Lets see how much it shrinks. What's the starting size?
my ($start_size) = (stat($file))[7];
my $start_time = time;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0455", variables => {
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0455", variables => {
file => $file,
size => $anvil->Convert->add_commas({number => $start_size})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $start_size}).")",
}});
@ -800,7 +800,7 @@ sub compress
my ($out_size) = (stat($out_file))[7];
my $took = time - $start_time;
my $difference = $start_size - $out_size;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0456", variables => {
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0456", variables => {
file => $out_file,
size => $anvil->Convert->add_commas({number => $out_size})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $out_size}).")",
difference => $anvil->Convert->add_commas({number => $difference})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $difference}).")",
@ -871,7 +871,7 @@ fi
{
# Compress!
my $start_time = time;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0455", variables => {
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0455", variables => {
file => $file,
size => $anvil->Convert->add_commas({number => $start_size})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $start_size}).")",
}});
@ -971,7 +971,7 @@ fi
{
my $took = time - $start_time;
my $difference = $start_size - $out_size;
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => $debug, key => "log_0456", variables => {
$anvil->Log->entry({source => $THIS_FILE, line => __LINE__, level => 1, key => "log_0456", variables => {
file => $out_file,
size => $anvil->Convert->add_commas({number => $out_size})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $out_size}).")",
difference => $anvil->Convert->add_commas({number => $difference})." (".$anvil->Convert->bytes_to_human_readable({'bytes' => $difference}).")",

@ -784,10 +784,13 @@ Failed to promote the DRBD resource: [#!variable!resource!#] primary. Expected a
<key name="log_0450">Skipping the network scan. The next scheduled scan will be done in: [#!variable!next_scan!#]. Override with '--force'.</key>
<key name="log_0451">Checking to see if any data needs to be archived before starting the resync.</key>
<key name="log_0452">Skipping archiving, not a Striker dashboard.</key>
<key name="log_0453">Archiving: [#!variable!records!#] over: [#!variable!loops!#] segments from the table: [#!variable!table!#] from the database on: [#!variable!host!#].</key>
<key name="log_0453">Archiving: [#!variable!records!#] over: [#!variable!loops!#] segments from the table: [#!variable!table!#] from the database on: [#!variable!host!#]. This might take a bit, please be patient.</key>
<key name="log_0454">Writing: [#!variable!records!#] to the file: [#!variable!file!#].</key>
<key name="log_0455">The file to be compressed: [#!variable!file!#] has a current size of: [#!variable!size!#].</key>
<key name="log_0455">The file to be compressed: [#!variable!file!#] has a current size of: [#!variable!size!#]. Please be patient, this can take a bit of time.</key>
<key name="log_0456">The compressed file is: [#!variable!file!#] is: [#!variable!size!#], a reduction of: [#!variable!difference!#]. The compression took: [#!variable!took!#].</key>
<key name="log_0457">Removing archived records.</key>
<key name="log_0458">Vacuuming the database to purge the removed records.</key>
<key name="log_0459">Skipping the table: [#!variable!table!#], it is excluded from archiving.</key>
<!-- Test words. Do NOT change unless you update 't/Words.t' or tests will needlessly fail. -->
<key name="t_0000">Test</key>

Loading…
Cancel
Save