SELECT INTO history_health * FROM health WHERE health_uuid = new.health_uuid;
INSERT INTO history.health
(health_uuid,
health_host_uuid,
health_agent_name,
health_source_name,
health_source_weight,
modified_date)
VALUES
(history_health.health_uuid,
history_health.health_host_uuid,
history_health.health_agent_name,
history_health.health_source_name,
history_health.health_source_weight,
history_health.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_health() OWNER TO admin;
CREATE TRIGGER trigger_health
AFTER INSERT OR UPDATE ON health
FOR EACH ROW EXECUTE PROCEDURE history_health();
CREATE TABLE power (
power_uuid uuid primary key,
power_host_uuid uuid not null, -- The name of the node or dashboard that this power came from.
power_ups_uuid uuid not null, -- This is the 'upses' -> 'ups_uuid' of the UPS. This is used to map what UPSes are powering a given node.
power_agent_name text not null, -- This is the name of the scan agent that wrote a given entry
power_on_battery boolean not null, -- TRUE == use "time_remaining" to determine if graceful power off is needed. FALSE == power loss NOT imminent, do not power off node.
power_seconds_left numeric, -- Should always be set, but not required *EXCEPT* when 'power_on_battery' is TRUE.
power_charge_percentage numeric, -- Percentage charge in the UPS. Used to determine when the dashboard should boot the node after AC restore
SELECT INTO history_power * FROM power WHERE power_uuid = new.power_uuid;
INSERT INTO history.power
(power_uuid,
power_host_uuid,
power_ups_uuid,
power_agent_name,
power_on_battery,
power_seconds_left,
power_charge_percentage,
modified_date)
VALUES
(history_power.power_uuid,
history_power.power_host_uuid,
history_power.power_ups_uuid,
history_power.power_agent_name,
history_power.power_on_battery,
history_power.power_seconds_left,
history_power.power_charge_percentage,
history_power.modified_date);
RETURN NULL;
END;
$$
LANGUAGE plpgsql;
ALTER FUNCTION history_power() OWNER TO admin;
CREATE TRIGGER trigger_power
AFTER INSERT OR UPDATE ON power
FOR EACH ROW EXECUTE PROCEDURE history_power();
-- This stores temperature information for a given host. ScanCore checks this data to decice if action needs
-- to be taken during a thermal event. On nodes, this is used to decide if a node should be shed or if an
-- Anvil! needs to be stopped entirely. On dashboards, this is used to check if/when it is safe to restart a
-- node that shut down because of a thermal event.
CREATE TABLE temperature (
temperature_uuid uuid primary key,
temperature_host_uuid uuid not null, -- The name of the node or dashboard that this temperature came from.
temperature_agent_name text not null, -- This is the name of the agent that set the alert
temperature_sensor_host text not null, -- This is the host (uuid) that the sensor was read from. This is important as ScanCore on a striker will read available thermal data from a node using it's IPMI data.
temperature_sensor_name text not null, -- This is the name of the sensor reporting the temperature
temperature_celsius numeric not null, -- This is the actual temperature, in celcius of course.
temperature_state text not null, -- This is a string represnting the state of the sensor. Valid values are 'ok', 'warning', and 'critical'
temperature_is text not null, -- This indicate if the temperature 'nominal', 'high' or 'low'.
server_user_stop boolean not null default FALSE, -- When set, the server was stopped by a user. The Anvil! will not start a server that has been cleanly stopped.
server_start_after_server_uuid uuid not null, -- This can be the server_uuid of another server. If set, this server will boot 'server_start_delay' seconds after the referenced server boots. A value of '00000000-0000-0000-0000-000000000000' will tell 'anvil-safe-start' to not boot the server at all. If a server is set not to start, any dependent servers will also stay off.
server_start_delay integer not null default 0, -- See above.
server_host_uuid uuid not null, -- This is the current hosts -> host_uuid for this server. If the server is off, this will be blank.
server_state text not null, -- This is the current state of this server, as reported by 'virsh list --all' (see: man virsh -> GENERIC COMMANDS -> --list)
server_live_migration boolean not null default TRUE, -- When false, servers will be frozen for a migration, instead of being migrated while the server is migrating. During a cold migration, the server will be unresponsive, so connections to it could time out. However, by being frozen the migration will complete faster.
server_pre_migration_file_uuid uuid not null, -- This is set to the files -> file_uuid of a script to run BEFORE migrating a server. If the file isn't found or can't run, the script is ignored.
server_pre_migration_arguments text not null, -- These are arguments to pass to the pre-migration script
server_post_migration_file_uuid uuid not null, -- This is set to the files -> file_uuid of a script to run AFTER migrating a server. If the file isn't found or can't run, the script is ignored.
server_post_migration_arguments text not null, -- These are arguments to pass to the post-migration script
server_ram_in_use numeric not null, -- This is the amount of RAM currently used by the server. If the server is off, then this is the amount of RAM last used when the server was running.
server_configured_ram numeric not null, -- This is the amount of RAM allocated to the server in the on-disk definition file. This should always match the table above, but allows us to track when a user manually updated the allocated RAM in the on-disk definition, but that hasn't yet been picked up by the server
server_updated_by_user numeric not null, -- This is set to a unix timestamp when the user last updated the definition (via striker). When set, scan-server will check this value against the age of the definition file on disk. If this is newer, the on-disk defition will be updated. On the host with the server (if any), the new definition will be loaded into virsh as well.
server_boot_time numeric not null, -- This is the unix time (since epoch) when the server booted. It is calculated by checking the 'ps -p <pid> -o etimes=' when a server is seen to be running when it had be last seen as off. If a server that had been running is seen to be off, this is set back to 0.
- How to write a NetworkManager dispatcher script to apply ethtool commands? - https://access.redhat.com/solutions/2841131
- Setup nodes to log to striker? - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/networking_guide/sec-configuring_netconsole
- Pacemaker can be monitored via SNMP - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/high_availability_add-on_reference/s1-snmpandpacemaker-HAAR
!!!! Find image based on IP(0x7FF8E976) /builddir/build/BUILD/tianocore-edk2-cb5f4f45ce/Build/OvmfX64/DEBUG_GCC5/X64/MdeModulePkg/Core/Dxe/DxeMain/DEBUG/DxeCore.dll (ImageBase=000000007FF80000, EntryPoint=000000007FF98DB1) !!!!
Changes made using tools such as nmcli do not require a reload but do require the associated interface to be put down and then up again. That can be done by using commands in the following format:
* nmcli dev disconnect interface-name
Followed by:
* nmcli con up interface-name
NOTE: RHEL doesn't support direct-cabled bonds - https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/7/html/networking_guide/ch-configure_network_bonding
ifcfg-X config Notes - /usr/share/doc/initscripts-*/sysconfig.txt (Look for the sections describing files /etc/sysconfig/network and /etc/sysconfig/network-scripts/ifcfg-<interface-name>);
* PREFIXx overrules NETMASKx. Use PREFIXx, not NETMASKx.
* The 'x' suffice for PREFIX, NETMASK, etc start at 0 and must count up by 1 at a time.
* ZONE will be useful for the firewall stuff later.
* ETHTOOL_OPTS is deprecated, replaced by using udev rules
* initscripts interpret PEERDNS=no to mean "never touch resolv.conf". NetworkManager interprets it to say "never add automatic (DHCP, PPP, VPN, etc.) nameservers to resolv.conf".
Bond
* resend_igmp & num_unsol_na={1~255} may help if a switch is slow to notice traffic has moved to the new interface. default is 1. Each update is send 200ms apart.
* Bridged interfaces should use BRIDGE_UUID="", _not_ BRIDGE="". The former causes the later to be ignored and the later is only used for possible compatibility reasons.
* resources can contain an US-ASCII character, except for spaces
* A resource is a single replication stream for 1 or more resources, max 65.535 vols per resource
* DRBD does, however, ship with an LVM integration facility that automates the creation of LVM snapshots immediately before synchronization. This ensures that a consistent copy of the data is always available on the peer, even while synchronization is running. See Using automated LVM snapshots during DRBD synchronization for details on using this facility.
* Checksum-based synchronization computes a block's hash on source and target and skips if matching, possibly making resync much faster for blocks rewritten with the same data, but at the cost of CPU. Make this a user-configurable option under the advanced tab.
* Suspended replication allows congested replication links to suspend replication, leaving the peer in a consistent state, but allowing the primary to "pull ahead". When the congestion passes, the delta resyncs. Make this a user-configurable option with scary warnings.
* Online verification can (should?) be run periodically on the server host (verification source will overwrite deltas on the verification target). Perhaps schedule to run once/month? Do resource sequentially as this places a CPU load on the nodes.
* Replication traffic integrity checking uses a given available kernel crypto to verify data integrity on transmission to the peer. If the replicated block can not be verified against the digest, the connection is dropped and immediately re-established; because of the bitmap the typical result is a retransmission.
** Make an option in the advanced tab. Test to see overhead this adds. Choose the lowest overhead algo (within reason)
* Support for disk flushes might be something we want to disable, as it seems to force write-through even with a function FBWC/BBU. Need to test.
* Note; "Inconsistent" is almost always useless. "Consistent" and "Outdated" are able to be used safely, just without whatever happened on the peer after.
* Truck based replication, also known as disk shipping, is a means of preseeding a remote site with data to be replicated, by physically shipping storage media to the remote site.
* Make sure that selinux doesn't block DRBD comms over the SN
* See "5.15.1. Growing on-line" for growing a DRBD resource
** Shrinking online is ONLY possible if the metadata is external. Worth creating *_md LVs? Offline requires backing up and restoring the MD
* A resource agent receives all configuration information about the resource it manages via environment variables. The names of these environment variables are always the name of the resource parameter, prefixed with OCF_RESKEY_. For example, if the resource has an ip parameter set to 192.168.1.1, then the resource agent will have access to an environment variable OCF_RESKEY_ip holding that value.
14:08 < lge> anyways, you can also "on-fail: retry"
OK, set the stop timeout to 60, set 'on-fail: block" and set the failure-timeout to 60 and see how pacemaker reacts.
failure-timeout
===
Migrate servers;
- Let ScanCore set 'node-health' attribute (http://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/1.1/html-single/Pacemaker_Explained/index.html#s-node-health)
- Set 'migration-limit' to '1' to enforce serial live migration (http://clusterlabs.org/pacemaker/doc/en-US/Pacemaker/1.1/html-single/Pacemaker_Explained/index.html#s-cluster-options).
Migrate a single server by setting a location constraint against the node we want the VM off of.
- If anything goes wrong, the server will enter a blocked state in pacemaker.
- Recovery needs to be 'unmanage -> clean' to avoid a stop call.
11:57 <@kgaillot> for your design, that sounds right. between cleanup and manage, i'd make sure there was a PE run without any pending actions blocked by the unmanaging -- you can either look at the logs on the DC, run "crm_simulate -SL", or just check the status for a bit
11:58 <@kgaillot> you can play around with it by putting a higher preference on the to-be-cleaned node, to make sure it *does* move when you re-manage. that way you can see what logs/simulate/status look like
12:07 <@kgaillot> i'm thinking if you do crm_resource --reprobe instead of cleanup in the above sequence, that should prevent anything unexpected
12:07 <@kgaillot> unmanage -> adjust preferences if needed -> reprobe resource -> wait for probe results to come back in, and if status looks good -> re-manage
12:08 <@kgaillot> the reprobe will wipe the entire resource history and fail counts for the resource, causing pacemaker to recheck the current status on all nodes. if the status then shows the resource running where you expect/want it, with no errors, then it's not going to do anything further
12:09 <@kgaillot> (in 2.0, cleanup only erases the history where the resource has failed, while reprobe erases the history regardless)
12:13 <@kgaillot> if there are no failures in the resource history, there should be no risk of a full stop. if there is no resource history at all, then after reprobe, there should be no risk of any actions (assuming you've set up location preferences and stickiness how you want them)
Recover from a failed migration;
reset location to prefer current host -> unmanage resource -> cleanup resource -> manage resource
(running on node 2, so re-add location constraint - basically, make sure location constraint favours current host)
# Command section – Refer to Chapter 2 for a list of kickstart options. You must include the required options.
### NOTE: The %packages, %pre, %pre-install, %post, %onerror, and %traceback sections are all required to be closed with %end
# Section 2
# The %packages section – Refer to Chapter 3 for details.
# Section 3:
# The %pre, %pre-install, %post, %onerror, and %traceback sections – These sections can be in any order and are not required. Refer to Chapter 4, Chapter 5, and Chapter 6 for details.
====
0 root@pulsar:/var/lib/libvirt/images# dev_PATH=$(udevadm info /dev/sdb | grep -e ID_PATH=)
0 root@pulsar:/var/lib/libvirt/images# if [[ $dev_PATH == *"usb"* ]]; then echo "USB drive"; elif [[ $dev_PATH == *"nvme"* ]]; then echo "NVMe drive"; elif [[ $dev_PATH == *"ata"* ]]; then echo "SATA drive"; elif [[ $dev_PATH == *"scsi"* ]]; then echo "SCSI drive"; fi
USB drive
0 root@pulsar:/var/lib/libvirt/images# dev_PATH=$(udevadm info /dev/sda | grep -e ID_PATH=)
0 root@pulsar:/var/lib/libvirt/images# if [[ $dev_PATH == *"usb"* ]]; then echo "USB drive"; elif [[ $dev_PATH == *"nvme"* ]]; then echo "NVMe drive"; elif [[ $dev_PATH == *"ata"* ]]; then echo "SATA drive"; elif [[ $dev_PATH == *"scsi"* ]]; then echo "SCSI drive"; fi
SATA drive
0 root@pulsar:/var/lib/libvirt/images# dev_PATH=$(udevadm info /dev/nvme0n1 | grep -e ID_PATH=)
0 root@pulsar:/var/lib/libvirt/images# if [[ $dev_PATH == *"usb"* ]]; then echo "USB drive"; elif [[ $dev_PATH == *"nvme"* ]]; then echo "NVMe drive"; elif [[ $dev_PATH == *"ata"* ]]; then echo "SATA drive"; elif [[ $dev_PATH == *"scsi"* ]]; then echo "SCSI drive"; fi
NVMe drive
[root@localhost ~]# dev_PATH=$(udevadm info /dev/sda | grep -e ID_PATH=)
[root@localhost ~]# if [[ $dev_PATH == *"usb"* ]]; then echo "USB drive"; elif [[ $dev_PATH == *"nvme"* ]]; then echo "NVMe drive"; elif [[ $dev_PATH == *"ata"* ]]; then echo "SATA drive"; elif [[ $dev_PATH == *"scsi"* ]]; then echo "SCSI drive"; fi
7 Miscellaneous (including macro packages and conventions), e.g. man(7), groff(7)
8 System administration commands (usually only for root)
A manual page consists of several sections.
Conventional section names include NAME, SYNOPSIS, CONFIGURATION, DESCRIPTION, OPTIONS, EXIT STATUS, RETURN VALUE, ERRORS, ENVIRONMENT, FILES, VERSIONS, CONFORMING TO, NOTES, BUGS, EXAMPLE, AUTHORS, and SEE ALSO.
The following conventions apply to the SYNOPSIS section and can be used as a guide in other sections.
bold text type exactly as shown.
italic text replace with appropriate argument.
[-abc] any or all arguments within [ ] are optional.
-a|-b options delimited by | cannot be used together.
argument ... argument is repeatable.
[expression] ... entire expression within [ ] is repeatable.
====
BEGIN TRANSACTION;
DROP FUNCTION history_alerts() CASCADE;
DROP TABLE history.alerts;
DROP TABLE alerts;
CREATE TABLE alerts (
alert_uuid uuid not null primary key,
alert_host_uuid uuid not null, -- The name of the node or dashboard that this alert came from.
alert_set_by text not null,
alert_level integer not null, -- 1 (critical), 2 (warning), 3 (notice) or 4 (info)
alert_title text not null, -- ScanCore will read in the agents <name>.xml words file and look for this message key
alert_message text not null, -- ScanCore will read in the agents <name>.xml words file and look for this message key
alert_sort_position integer not null default 9999, -- The alerts will sort on this column. It allows for an optional sorting of the messages in the alert.
alert_show_header integer not null default 1, -- This can be set to have the alert be printed with only the contents of the string, no headers.
<!-- IPMI data comes from hosts -> host_ipmi. If it is found, it always is used as the first fence device -->
<!-- PDU shows how to reference devices -->
<method name="pdu" type="pdu" order="1">
<!-- The 'name' parameter has to match an entry under devices -> pdu's name -->
<device name="xx-pdu01" port="1" />
<device name="xx-pdu02" port="2" />
</method>
<!-- This would only happen on it's own, but is here for example. The 'server_name' is the name of the VM on the host -->
<method name="kvm" type="kvm" order="1">
<device name="host1" server_name="xx-a01n01" />
</method>
</fence>
<power>
<!-- The 'name' parameter has to match an entry under devices -> ups's name -->
<ups name="xx-ups01" />
<ups name="xx-ups02" />
</power>
</node>
<dr name="xx-a01dr01.digimer.ca" uuid="xxx">
<!-- IPMI is used to power on/off for scheduled, periodic resyncs. -->
</dr>
</machines>
<!-- These devices need to reference entries in the 'fences' database table.
<fences>
<!-- When a machine references these, the 'type="x"' references the child element and the contained 'name="x"' references the child's child element by name -->