- First, check if either node is SyncSource, if so use that node.
- Second, check which node has the most servers by RAM count, use that node.
2. Provision;
- Create DRBD resource, force primary on install target
- Create pacemaker resource in stopped state
- Set location constraint to prefer target node
- Boot server
Create "Node status" which returns "degraded" if the peer is gone
Common queries;
* SELECT a.job_uuid, b.host_name, a.job_command, a.job_data, a.job_progress, a.job_status FROM jobs a, hosts b WHERE a.job_host_uuid = b.host_uuid AND a.job_progress != 100;
* SELECT a.host_name, b.file_name, c.file_location_active FROM hosts a, files b, file_locations c WHERE a.host_uuid = c.file_location_host_uuid AND b.file_uuid = c.file_location_file_uuid ORDER BY b.file_name ASC, a.host_name ASC;
* SELECT a.dr_link_uuid, b.host_name, c.anvil_name, a.dr_link_note FROM dr_links a, hosts b, anvils c WHERE a.dr_link_host_uuid = b.host_uuid AND a.dr_link_anvil_uuid = c.anvil_uuid ORDER BY c.anvil_name ASC, b.host_name ASC;
@ -37,10 +37,22 @@ In Maintenance Mode: ..... [#!variable!maintenance_mode!#]
#!variable!difference!#
====
</key>
<keyname="scan_cluster_alert_0013">The server: [#!variable!server!#] was found to be failed in pacemaker, but it was successfully recovered. This does NOT mean the server rebooted, but it may have. Checking the server is advised.</key>
<keyname="scan_cluster_alert_0014">The server: [#!variable!server!#] was found to be failed in pacemaker. The attempt to recover it appears to have failed. The server might well still be running ok, checking the server is advised.</key>
<keyname="scan_cluster_alert_0015">The server: [#!variable!server!#] had been found to be failed in pacemaker. It's now recovered. This does NOT mean the server rebooted, but it may have. Checking the server is advised.</key>
<keyname="scan_cluster_log_0002">This host is a: [#!variable!host_type!#], this agent is only useful on nodes. Exiting.</key>
<keyname="scan_cluster_log_0003">[ Warning ] - The server: [#!variable!server!#] is in a FAILED state! Checking to see if it's safe to attempt recovery.</key>
<keyname="scan_cluster_log_0004">Searching node: [#!variable!node_name!# (#!variable!host_uuid!#] which is in ready state: [#!variable!node_ready!#].</key>
<keyname="scan_cluster_log_0005">Searching for the server on the local system.</key>
<keyname="scan_cluster_log_0006">Searching for the server on the peer using IP: [#!variable!target_ip!#].</key>
<keyname="scan_cluster_log_0007">The server is running locally and we're a full cluster member. Will attempt recover.</key>
<keyname="scan_cluster_log_0008">Both nodes are up and the server wasn't found anywhere. Attempting recovery.</key>
<keyname="scan_cluster_log_0009">The server was found to be running, but not here (or this node is not fully in the cluster). NOT attempting recovery yet.</key>
foreach my $variable (sort {$a cmp $b} keys %{$anvil->data->{sql}{scan_storcli_variables}{scan_storcli_variable_uuid}{source_table}{'scan_storcli_cachevaults'}{source_uuid}{$cachevault_uuid}{$type}})
{
# This variable has vanished
next if not defined $anvil->data->{sql}{scan_storcli_variables}{scan_storcli_variable_uuid}{source_table}{'scan_storcli_cachevaults'}{source_uuid}{$cachevault_uuid}{$type}{$variable}{scan_storcli_variable_value};
my $old_variable_value = $anvil->data->{sql}{scan_storcli_variables}{scan_storcli_variable_uuid}{source_table}{'scan_storcli_cachevaults'}{source_uuid}{$cachevault_uuid}{$type}{$variable}{scan_storcli_variable_value};
my $variable_uuid = $anvil->data->{sql}{scan_storcli_variables}{scan_storcli_variable_uuid}{source_table}{'scan_storcli_cachevaults'}{source_uuid}{$cachevault_uuid}{$type}{$variable}{scan_storcli_variable_uuid};
$anvil->Log->variables({source => $THIS_FILE, line => __LINE__, level => 2, list => {
<keyname="warning_0147">[ Warning ] - The interface: [#!variable!interface!#] appears to be down (state: [#!variable!state!#]). The system uptime is: [#!variable!uptime!#], so it might be a problem where the interface didn't start on boot as it should have. So we're going to bring the interface up.</key>
<keyname="warning_0148">[ Warning ] - The IPMI stonith resource: [#!variable!resource!#] is in the role: [#!variable!role!#] (should be 'Started'). Will check the IPMI config now.</key>
<keyname="warning_0149">[ Warning ] - Failed to find a valid IP address or password to be used to setup the DR host's IPMI.</key>
<keyname="warning_0150">[ Warning ] - The test "fail file": [#!variable!fail_file!#] was found. So long as this file exists, the ocf:alteeve:server RA will return 'OCF_ERR_GENERIC' (exit code 1). Delete the file to resume normal operation.</key>
</language>
<!-- 日本語 -->
<languagename="jp"long_name="日本語"description="Anvil! language file.">