From fd4cf3bf21325e485258910b928a87373cc66dc5 Mon Sep 17 00:00:00 2001 From: f0o Date: Mon, 18 May 2015 10:57:45 +0000 Subject: [PATCH 001/497] Initial commit --- README.md | 122 +++- agent-local/README | 5 + agent-local/apache | 92 +++ agent-local/bind | 21 + agent-local/dmi | 9 + agent-local/dpkg | 20 + agent-local/drbd | 372 ++++++++++++ agent-local/hddtemp | 10 + agent-local/memcached | 13 + agent-local/munin | 15 + agent-local/mysql | 1255 +++++++++++++++++++++++++++++++++++++++ agent-local/nginx | 38 ++ agent-local/rpm | 19 + agent-local/temperature | 4 + agent-local/tinydns | 18 + check_mk_COPYING | 341 +++++++++++ check_mk_agent | 659 ++++++++++++++++++++ check_mk_xinetd | 49 ++ 18 files changed, 3060 insertions(+), 2 deletions(-) create mode 100644 agent-local/README create mode 100755 agent-local/apache create mode 100755 agent-local/bind create mode 100755 agent-local/dmi create mode 100755 agent-local/dpkg create mode 100755 agent-local/drbd create mode 100755 agent-local/hddtemp create mode 100755 agent-local/memcached create mode 100755 agent-local/munin create mode 100755 agent-local/mysql create mode 100755 agent-local/nginx create mode 100755 agent-local/rpm create mode 100755 agent-local/temperature create mode 100755 agent-local/tinydns create mode 100644 check_mk_COPYING create mode 100755 check_mk_agent create mode 100644 check_mk_xinetd diff --git a/README.md b/README.md index bd326dcbd..2340b52e6 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,120 @@ -# agent -LibreNMS Agent & Scripts +Agent setup +----------- + +To gather data from remote systems you can use LibreNMS in combination with check_mk (included in the librenms-agent repository). + +The agent uses TCP-Port 6556, please allow access from the LibreNMS-Host and Poller-Nodes if you're using the Distributed Polling setup. + +On each of the hosts you would like to use the agent on then you need to do the following: + +* Copy the `check_mk_agent` script into `/usr/bin` and make it executable. + +```shell +cp check_mk_agent /usr/bin/check_mk_agent +chmod +x /usr/bin/check_mk_agent +``` + +* Copy the xinetd config file into place. + +```shell +cp check_mk_xinetd /etc/xinetd.d/check_mk +``` + +* Create the relevant directories. + +```shell +mkdir -p /usr/lib/check_mk_agent/plugins /usr/lib/check_mk_agent/local +``` + +* Copy each of the scripts from `agent-local/` into `/usr/lib/check_mk_agent/local` +* And restart xinetd. + +```shell +/etc/init.d/xinetd restart +``` + +* Login to the LibreNMS web interface and edit the device you want to monitor. Under the modules section, ensure that unix-agent is enabled. +* Then under Applications, enable the apps that you plan to monitor. +* Wait, in around 10 minutes you should start seeing data in your graphs under Apps for the device. + +## Application Specific Configuration + +### BIND9/named + +Create stats file with appropriate permissions: +```shell +~$ touch /etc/bind/named.stats +~$ chown bind:bind /etc/bind/named.stats +``` +Change `user:group` to the user and group that's running bind/named. + +Bind/named configuration: +```text +options { + ... + statistics-file "/etc/bind/named.stats"; + zone-statistics yes; + ... +}; +``` +Restart your bind9/named after changing the configuration. + +Verify that everything works by executing `rndc stats && cat /etc/bind/named.stats`. +In case you get a `Permission Denied` error, make sure you chown'ed correctly. + +Note: if you change the path you will need to change the path in `agent-local/bind`. + +### TinyDNS/djbdns + +__Installation__: + +1. Get tinystats sources from http://www.morettoni.net/tinystats.en.html +2. Compile like as advised. + _Note_: In case you get `Makefile:9: *** missing separator. Stop.`, compile manually using: + * With IPv6: `gcc -Wall -O2 -fstack-protector -DWITH_IPV6 -o tinystats tinystats.c` + * Without IPv6: `gcc -Wall -O2 -fstack-protector -o tinystats tinystats.c` +3. Install into prefered path, like `/usr/bin/`. + +__Configuration__: + +_Note_: In this part we assume that you use DJB's [Daemontools](http://cr.yp.to/daemontools.html) to start/stop tinydns. +And that your tinydns-instance is located in `/service/dns`, adjust this path if necesary. + +1. Replace your _log_'s `run` file, typically located in `/service/dns/log/run` with: + ``` + #!/bin/sh + + exec setuidgid dnslog tinystats ./main/tinystats/ multilog t n3 s250000 ./main/ + ``` +2. Create tinystats directory and chown: + `mkdir /service/dns/log/main/tinystats && chown dnslog:nofiles /service/dns/log/main/tinystats` +3. Restart TinyDNS and Daemontools: `/etc/init.d/svscan restart` + _Note_: Some say `svc -t /service/dns` is enough, on my install (Gentoo) it doesnt rehook the logging and I'm forced to restart it entirely. + +### MySQL + +Unlike most other scripts, the MySQL script requires a configuration file `/usr/lib/check_mk_agent/local/mysql.cnf` with following content: + +```php +; + close INFILE; +} else { + # grab the status URL (fresh data) + @data = split /(\n)/, LWP::Simple::get( 'http://localhost/server-status?auto' ) + or die "Data fetch failure.\n"; + + # write file + $tmpfile = "$CACHEFILE.TMP.$PID"; + open (OUTFILE, ">$tmpfile") + or die "File open failure: $tmpfile\n"; + print OUTFILE @data; + close OUTFILE; + rename ( $tmpfile, $CACHEFILE ); +} + +print "<<>>\n"; + +# dice up the data +@scoreboardkey = ( '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ); +%params = {}; +foreach $line (@data) { + chomp $line; + @fields = split( /: /, $line); + if ($fields[0] eq 'Scoreboard') { + # count up the scoreboard into states + %states = {}; + foreach $state (@scoreboardkey) { + $states{$state} = 0; + } + foreach $state ( split(//, $fields[1]) ) { + $states{$state}++; + } + } elsif ($fields[0] eq 'Total kBytes') { + # turn into base (byte) value + $params{$fields[0]} = int($fields[1])*1024; + } else { + # just store everything else + $params{$fields[0]} = $fields[1]; + } +} + +# output the data in order (this is because some platforms don't have them all) +@dataorder = ( + 'Total Accesses', + 'Total kBytes', + 'CPULoad', + 'Uptime', + 'ReqPerSec', + 'BytesPerSec', + 'BytesPerReq', + 'BusyServers', + 'IdleServers' +); +foreach $param (@dataorder) { + if (exists $params{$param}) { + print $params{$param}."\n"; + } else { + # not all Apache's have all stats + print "U\n"; + } +} + +# print the scoreboard +foreach $state (@scoreboardkey) { + print $states{$state}."\n"; +} + diff --git a/agent-local/bind b/agent-local/bind new file mode 100755 index 000000000..75110d982 --- /dev/null +++ b/agent-local/bind @@ -0,0 +1,21 @@ +#!/bin/bash +# (c) 2015, f0o@devilcode.org +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +stats="/etc/bind/named.stats" + +echo "<<>>" +> $stats +rndc stats && cat $stats + diff --git a/agent-local/dmi b/agent-local/dmi new file mode 100755 index 000000000..f040c89dd --- /dev/null +++ b/agent-local/dmi @@ -0,0 +1,9 @@ +#!/bin/bash + +echo '<<>>' + +# requires dmidecode +for FIELD in bios-vendor bios-version bios-release-date system-manufacturer system-product-name system-version system-serial-number system-uuid baseboard-manufacturer baseboard-product-name baseboard-version baseboard-serial-number baseboard-asset-tag chassis-manufacturer chassis-type chassis-version chassis-serial-number chassis-asset-tag processor-family processor-manufacturer processor-version processor-frequency +do + echo $FIELD=$(dmidecode -s $FIELD) +done diff --git a/agent-local/dpkg b/agent-local/dpkg new file mode 100755 index 000000000..1c3d08011 --- /dev/null +++ b/agent-local/dpkg @@ -0,0 +1,20 @@ +#!/bin/bash +# Cache the file for 30 minutes +# If you want to override this, put the command in cron. +# We cache because it is a 1sec delay, which is painful for the poller +if [ -x /usr/bin/dpkg-query ]; then + DATE=$(date +%s) + FILE=/tmp/agent-local-dpkg + + if [ ! -e $FILE ]; then + dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE + fi + FILEMTIME=$(stat -c %Y $FILE) + FILEAGE=$(($DATE-$FILEMTIME)) + if [ $FILEAGE -gt 1800 ]; then + dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE + fi + echo "<<>>" + cat $FILE +fi + diff --git a/agent-local/drbd b/agent-local/drbd new file mode 100755 index 000000000..49e62b1f5 --- /dev/null +++ b/agent-local/drbd @@ -0,0 +1,372 @@ +#!/usr/bin/perl + +=head1 NAME + +check_drbd - Nagios plugin for DRBD + +=head1 SYNOPSIS + +B [B<--verbose> | B<-v>] + +=head1 DESCRIPTION + +B is a Nagios plugin for DRBD. It checks the connection state, +resource roles and disk states for every configured DRBD resource, and +produces a WARNING or CRITICAL alert if anything is amiss. The states +of both the local and remote sides of each connection are monitored. + +=head2 Nagios status information + +The status information emitted by this plugin is similar to the information +in F: + + drbd0: Connected Primary/Secondary UpToDate/UpToDate + | | | | | | + | | | | | Remote disk state + | | | | Local disk state + | | | Remote resource role + | | Local resource role + | Connection state + DRBD device + +If more than one device is present, and all devices are OK, the output is +summarised: + + drbd0: PriConUpT, drbd1: SecConUpT + +If any devices are not OK, the output contains their statuses in full. + +=head2 Nagios performance data + +Complete performance data is emitted for all configured DRBD resources: + +=over + +=item drbdI<*>_ns + +=item drbdI<*>_nr + +The volume of network data sent to and received from the peer, in kiB. + +=item drbdI<*>_dw + +=item drbdI<*>_dr + +The volume of network data written to and read from the local disk, in kiB. + +=item drbdI<*>_al + +The number of updates of the activity log area of the metadata. + +=item drbdI<*>_lo + +The number of open requests to the local I/O subsystem issued by DRBD. + +=item drbdI<*>_pe + +The number of requests sent to the peer but not yet been answered by the latter. + +=item drbdI<*>_ua + +The number of requests received by the peer but not yet been answered by the latter. + +=item drbdI<*>_ap + +The number of block I/O requests forwarded by DRBD, but not yet answered by DRBD. + +=item drbdI<*>_ep + +The number of epoch objects. + +=item drbdI<*>_oos + +The amount of storage currently out-of-sync, in kiB. + +=back + +=head1 OPTIONS + +=over + +=item B<-v>, B<--verbose> + +Increase the verbosity of the output messages. This disables the Nagios status +information summarisation described above: all resources' statuses are printed +in full. + +=back + +=head1 EXIT STATUS + +=over + +=item 0 + +All resources are OK. + +=item 1 + +Some resources are not OK, but do not need immediate attention. + +=item 2 + +Some resources are not OK and need immediate attention. + +=item 3 + +An error occurred while collecting the resources' statuses. + +=back + +=head1 FILES + +F + +=head1 SEE ALSO + +L + +=cut + +use strict; +use warnings; + +use constant BASENAME => ($0 =~ m{.*/([^/]+)})[0] || 'check_drbd'; + +use constant STATE_FILE => '/proc/drbd'; + +use constant { + OK => 0, + WARNING => 1, + CRITICAL => 2, + UNKNOWN => 3, +}; + +use Getopt::Long; +use IO::File; + +sub help; +sub usage; + +sub perfdata; +sub ok; +sub warning; +sub critical; +sub unknown; + +sub get_state; + +$SIG{__DIE__} = sub { + die @_ if $^S; + print @_; + exit UNKNOWN; +}; + +my $verbose; + +Getopt::Long::Configure('bundling', 'no_ignore_case'); +GetOptions( + 'verbose|v+' => \$verbose, + 'help|?' => sub { help; exit 0 }, + 'usage' => sub { usage; exit 0 }, +) and @ARGV == 0 + or do { usage; exit UNKNOWN }; + +my @state = get_state; +my $status = OK; + +print "<<>>\n"; + +foreach my $id (0 .. $#state) { + my $device = $state[$id] + or next; + + # Assume CRITICAL by default + + foreach (qw( cs )) { + $device->{"${_}_level"} = { + Connected => OK, + Unconfigured => OK, + StandAlone => WARNING, + SyncingAll => WARNING, + SyncingQuick => WARNING, + SyncSource => WARNING, + SyncTarget => WARNING, + VerifyS => WARNING, + VerifyT => WARNING, + Disconnecting => WARNING, + TearDown => WARNING, + StartingSyncS => WARNING, + StartingSyncT => WARNING, + WFSyncUUID => WARNING, + }->{$device->{$_}}; + $device->{"${_}_level"} = CRITICAL unless defined $device->{"${_}_level"}; + + if ($device->{oos}) { + $device->{oos_level} = { + StartingSyncS => OK, + StartingSyncT => OK, + SyncSource => OK, + SyncTarget => OK, + PausedSyncS => OK, + PausedSyncT => OK, + }->{$device->{$_}}; + $device->{oos_level} = CRITICAL unless defined $device->{oos_level}; + } + } + + foreach (qw( ro pro )) { + $device->{"${_}_level"} = { + Primary => OK, + Secondary => OK, + }->{$device->{$_}}; + $device->{"${_}_level"} = CRITICAL unless defined $device->{"${_}_level"}; + } + + foreach (qw( ds pds )) { + $device->{"${_}_level"} = { + UpToDate => OK, + Consistent => OK, + Negotiating => WARNING, + Attaching => WARNING, + }->{$device->{$_}}; + $device->{"${_}_level"} = CRITICAL unless defined $device->{"${_}_level"}; + } + + my @extra; + if ($device->{oos}) { + push @extra, sprintf '%d kiB out-of-sync', $device->{oos}; + } + if ($device->{iof} !~ /^r.--(.(-)?)?$/) { + $device->{iof_level} = CRITICAL; + push @extra, sprintf 'I/O flags: %s', $device->{iof}; + } + my $extra = @extra ? sprintf(' (%s)', join ', ', @extra) : ''; + + my $level = OK; + foreach (grep /_level$/, keys %$device) { + $level = $device->{$_} if $level < $device->{$_}; + } + $status = $level if $status < $level; + + $device->{level} = $level; + $device->{info} = sprintf 'drbd%d:cs=%s|ro=%s|pro=%s|ds=%s|pds=%s|extra=%s', $id, $device->{cs}, $device->{ro}, $device->{pro}, $device->{ds}, $device->{pds}, $extra; + $device->{short} = sprintf 'drbd%d: %0.3s%0.3s%0.3s%s', $id, $device->{ro}, $device->{cs}, $device->{ds}, $extra; # Role and connstate reversed, like old check_drbd + + foreach (qw( ns nr dw dr al bm )) { + my $value = $device->{$_}; + defined $value + or next; + perfdata "${_}=${value}"; + } + + foreach (qw( lo pe ua ap oos )) { + my $value = $device->{$_}; + defined $value + or next; + perfdata "${_}=${value}"; + } +} + +@state + or critical 'No DRBD volumes present'; + +if ($status) { + my $message = join ', ', map $_->{info}, grep { defined and $_->{level} } @state; + if ($status == WARNING) { + warning $message; + } else { + critical $message; + } +} else { + my $message = join ', ', map { ($verbose || @state == 1) ? $_->{info} : $_->{short} } grep defined, @state; + ok $message; +} + +die; + +########################################################################### + +sub help { + print <) { + if (m(^ \s* (\d+): \s* cs:(\w+) \s+ (?:ro|st):(\w+)/(\w+) \s+ ds:(\w+)/(\w+) \s+ \S+ \s+ (\S+))x) { + $device = $state[$1] = { + cs => $2, + ro => $3, + pro => $4, + ds => $5, + pds => $6, + iof => $7, + }; + next; + }; + + $device or next; + $device->{$1} = $2 while /(\w+):(\S+)/g; + } + + @state; +} diff --git a/agent-local/hddtemp b/agent-local/hddtemp new file mode 100755 index 000000000..b6780396d --- /dev/null +++ b/agent-local/hddtemp @@ -0,0 +1,10 @@ +#!/bin/bash + +# hddtemp sensor readings +# needs hddtemp daemon listening on (at least) localhost +# requires netcat to be installed and in the path +# (c) 2012, Tom Laermans for Observium + +echo '<<>>' +nc localhost 7634 +echo diff --git a/agent-local/memcached b/agent-local/memcached new file mode 100755 index 000000000..b4ed626f8 --- /dev/null +++ b/agent-local/memcached @@ -0,0 +1,13 @@ +#!/usr/bin/php +addServer('localhost', 11211); +$stats = $m->getStats(); + +if(is_array($stats)) +{ + echo("<<>>\n"); + echo(serialize($m->getStats())); + echo("\n"); +} +?> diff --git a/agent-local/munin b/agent-local/munin new file mode 100755 index 000000000..47e513fa5 --- /dev/null +++ b/agent-local/munin @@ -0,0 +1,15 @@ +# Lokale Einzelchecks +export MUNIN_LIBDIR=/usr/share/munin +if cd munin-scripts +then + for skript in $(ls) + do + if [ -x "$skript" ] ; then + echo "<<>>" + ./$skript + ./$skript config + fi + done + cd .. +fi + diff --git a/agent-local/mysql b/agent-local/mysql new file mode 100755 index 000000000..c5077c859 --- /dev/null +++ b/agent-local/mysql @@ -0,0 +1,1255 @@ +#!/usr/bin/php + true, # Do you want to check InnoDB statistics? + 'master' => true, # Do you want to check binary logging? + 'slave' => true, # Do you want to check slave status? + 'procs' => true, # Do you want to check SHOW PROCESSLIST? +); + +$use_ss = FALSE; # Whether to use the script server or not +$debug = FALSE; # Define whether you want debugging behavior. +$debug_log = FALSE; # If $debug_log is a filename, it'll be used. + +# ============================================================================ +# You should not need to change anything below this line. +# ============================================================================ +$version = "1.1.7"; + +# ============================================================================ +# Include settings from an external config file (issue 39). +# ============================================================================ +if (file_exists(__FILE__ . '.cnf' ) ) { + require(__FILE__ . '.cnf'); +} else { + echo("No ".__FILE__ . '.cnf found!'); + exit(); +} + +# Make this a happy little script even when there are errors. +$no_http_headers = true; +ini_set('implicit_flush', false); # No output, ever. +if ($debug ) { + ini_set('display_errors', true); + ini_set('display_startup_errors', true); + ini_set('error_reporting', 2147483647); +} +else { + ini_set('error_reporting', E_ERROR); +} +ob_start(); # Catch all output such as notices of undefined array indexes. +function error_handler($errno, $errstr, $errfile, $errline) { + print("$errstr at $errfile line $errline\n"); + debug("$errstr at $errfile line $errline"); +} +# ============================================================================ +# Set up the stuff we need to be called by the script server. +# ============================================================================ +#if ($use_ss ) { +# if (file_exists( dirname(__FILE__) . "/../include/global.php") ) { +# # See issue 5 for the reasoning behind this. +# debug("including " . dirname(__FILE__) . "/../include/global.php"); +# include_once(dirname(__FILE__) . "/../include/global.php"); +# } +# elseif (file_exists( dirname(__FILE__) . "/../include/config.php" ) ) { +# # Some Cacti installations don't have global.php. +# debug("including " . dirname(__FILE__) . "/../include/config.php"); +# include_once(dirname(__FILE__) . "/../include/config.php"); +# } +#} + +# ============================================================================ +# Make sure we can also be called as a script. +# ============================================================================ +if (!isset($called_by_script_server)) { + debug($_SERVER["argv"]); + array_shift($_SERVER["argv"]); # Strip off this script's filename + $options = parse_cmdline($_SERVER["argv"]); + validate_options($options); + $result = ss_get_mysql_stats($options); + + debug($result); + if (!$debug ) { + # Throw away the buffer, which ought to contain only errors. + ob_end_clean(); + } + else { + ob_end_flush(); # In debugging mode, print out the errors. + } + + echo("<<>>\n"); + + # Split the result up and extract only the desired parts of it. + $options['items'] = ""; + $wanted = explode(',', $options['items']); + $output = array(); + foreach ( explode(' ', $result) as $item ) { + if (in_array(substr($item, 0, 2), $wanted) ) { + $output[] = $item; + } + list($short, $val) = explode(":", $item); + echo(strtolower($short).":".strtolower($val)."\n"); + } + debug(array("Final result", $output)); + print(implode(' ', $output)); +} + +# ============================================================================ +# End "if file was not included" section. +# ============================================================================ +} + +# ============================================================================ +# Work around the lack of array_change_key_case in older PHP. +# ============================================================================ +if (!function_exists('array_change_key_case') ) { + function array_change_key_case($arr) { + $res = array(); + foreach ( $arr as $key => $val ) { + $res[strtolower($key)] = $val; + } + return $res; + } +} + +# ============================================================================ +# Validate that the command-line options are here and correct +# ============================================================================ +function validate_options($options) { + debug($options); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + # Required command-line options + foreach ( array() as $option ) { + if (!isset($options[$option]) || !$options[$option] ) { + usage("Required option --$option is missing"); + } + } + foreach ( $options as $key => $val ) { + if (!in_array($key, $opts) ) { + usage("Unknown option --$key"); + } + } +} + +# ============================================================================ +# Print out a brief usage summary +# ============================================================================ +function usage($message) { + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + + $usage = << --items [OPTION] + + --host Hostname to connect to; use host:port syntax to specify a port + Use :/path/to/socket if you want to connect via a UNIX socket + --items Comma-separated list of the items whose data you want + --user MySQL username; defaults to $mysql_user if not given + --pass MySQL password; defaults to $mysql_pass if not given + --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) + --nocache Do not cache results in a file + --port MySQL port; defaults to $mysql_port if not given + --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call + +EOF; + die($usage); +} + +# ============================================================================ +# Parse command-line arguments, in the format --arg value --arg value, and +# return them as an array ( arg => value ) +# ============================================================================ +function parse_cmdline( $args ) { + $result = array(); + $cur_arg = ''; + foreach ($args as $val) { + if (strpos($val, '--') === 0 ) { + if (strpos($val, '--no') === 0 ) { + # It's an option without an argument, but it's a --nosomething so + # it's OK. + $result[substr($val, 2)] = 1; + $cur_arg = ''; + } + elseif ($cur_arg ) { # Maybe the last --arg was an option with no arg + if ($cur_arg == '--user' || $cur_arg == '--pass' || $cur_arg == '--port' ) { + # Special case because Cacti will pass these without an arg + $cur_arg = ''; + } + else { + die("No arg: $cur_arg\n"); + } + } + else { + $cur_arg = $val; + } + } + else { + $result[substr($cur_arg, 2)] = $val; + $cur_arg = ''; + } + } + if ($cur_arg && ($cur_arg != '--user' && $cur_arg != '--pass' && $cur_arg != '--port') ) { + die("No arg: $cur_arg\n"); + } + debug($result); + return $result; +} + +# ============================================================================ +# This is the main function. Some parameters are filled in from defaults at the +# top of this file. +# ============================================================================ +function ss_get_mysql_stats( $options ) { + # Process connection options and connect to MySQL. + global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, + $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + + # Connect to MySQL. + $user = isset($options['user']) ? $options['user'] : $mysql_user; + $pass = isset($options['pass']) ? $options['pass'] : $mysql_pass; + $port = isset($options['port']) ? $options['port'] : $mysql_port; + $host = isset($options['host']) ? $options['host'] : $mysql_host; + + $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + # If there is a port, or if it's a non-standard port, we add ":$port" to the + # hostname. + $host_str = $host + . $port != 3306 ? ":$port" : ''; + debug(array('connecting to', $host_str, $user, $pass)); + if (!extension_loaded('mysql') ) { + debug("The MySQL extension is not loaded"); + die("The MySQL extension is not loaded"); + } + if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { + $conn = mysql_connect($host_str, $user, $pass, true, MYSQL_CLIENT_SSL); + } + else { + $conn = mysql_connect($host_str, $user, $pass); + } + if (!$conn ) { + die("MySQL: " . mysql_error()); + } + + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); + $cache_file = "$cache_dir/agent-local-mysql"; + debug("Cache file is $cache_file"); + + # First, check the cache. + $fp = null; + if (!isset($options['nocache']) ) { + if ($fp = fopen($cache_file, 'a+') ) { + $locked = flock($fp, 1); # LOCK_SH + if ($locked ) { + if (filesize($cache_file) > 0 + && filectime($cache_file) + ($cache_time) > time() + && ($arr = file($cache_file)) + ) {# The cache file is good to use. + debug("Using the cache file"); + fclose($fp); + return $arr[0]; + } + else { + debug("The cache file seems too small or stale"); + # Escalate the lock to exclusive, so we can write to it. + if (flock($fp, 2) ) { # LOCK_EX + # We might have blocked while waiting for that LOCK_EX, and + # another process ran and updated it. Let's see if we can just + # return the data now: + if (filesize($cache_file) > 0 + && filectime($cache_file) + ($cache_time) > time() + && ($arr = file($cache_file)) + ) {# The cache file is good to use. + debug("Using the cache file"); + fclose($fp); + return $arr[0]; + } + ftruncate($fp, 0); # Now it's ready for writing later. + } + } + } + else { + debug("Couldn't lock the cache file, ignoring it."); + $fp = null; + } + } + } + else { + $fp = null; + debug("Couldn't open the cache file"); + } + + # Set up variables. + $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc + # Define some indexes so they don't cause errors with += operations. + 'relay_log_space' => null, + 'binary_log_space' => null, + 'current_transactions' => null, + 'locked_transactions' => null, + 'active_transactions' => null, + 'innodb_locked_tables' => null, + 'innodb_tables_in_use' => null, + 'innodb_lock_structs' => null, + 'innodb_lock_wait_secs' => null, + 'innodb_sem_waits' => null, + 'innodb_sem_wait_time_ms'=> null, + # Values for the 'state' column from SHOW PROCESSLIST (converted to + # lowercase, with spaces replaced by underscores) + 'State_closing_tables' => null, + 'State_copying_to_tmp_table' => null, + 'State_end' => null, + 'State_freeing_items' => null, + 'State_init' => null, + 'State_locked' => null, + 'State_login' => null, + 'State_preparing' => null, + 'State_reading_from_net' => null, + 'State_sending_data' => null, + 'State_sorting_result' => null, + 'State_statistics' => null, + 'State_updating' => null, + 'State_writing_to_net' => null, + 'State_none' => null, + 'State_other' => null, # Everything not listed above + ); + + # Get SHOW STATUS and convert the name-value array into a simple + # associative array. + $result = run_query("SHOW /*!50002 GLOBAL */ STATUS", $conn); + foreach ( $result as $row ) { + $status[$row[0]] = $row[1]; + } + + # Get SHOW VARIABLES and do the same thing, adding it to the $status array. + $result = run_query("SHOW VARIABLES", $conn); + foreach ( $result as $row ) { + $status[$row[0]] = $row[1]; + } + + # Get SHOW SLAVE STATUS, and add it to the $status array. + if ($chk_options['slave'] ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + $slave_status_rows_gotten = 0; + foreach ( $result as $row ) { + $slave_status_rows_gotten++; + # Must lowercase keys because different MySQL versions have different + # lettercase. + $row = array_change_key_case($row, CASE_LOWER); + $status['relay_log_space'] = $row['relay_log_space']; + $status['slave_lag'] = $row['seconds_behind_master']; + + # Check replication heartbeat, if present. + if ($heartbeat ) { + $result2 = run_query( + "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" + . " AS delay FROM $heartbeat WHERE id = 1", $conn); + $slave_delay_rows_gotten = 0; + foreach ( $result2 as $row2 ) { + $slave_delay_rows_gotten++; + if ($row2 && is_array($row2) + && array_key_exists('delay', $row2) ) + { + $status['slave_lag'] = $row2['delay']; + } + else { + debug("Couldn't get slave lag from $heartbeat"); + } + } + if ($slave_delay_rows_gotten == 0 ) { + debug("Got nothing from heartbeat query"); + } + } + + # Scale slave_running and slave_stopped relative to the slave lag. + $status['slave_running'] = ($row['slave_sql_running'] == 'Yes') + ? $status['slave_lag'] : 0; + $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') + ? 0 : $status['slave_lag']; + } + if ($slave_status_rows_gotten == 0 ) { + debug("Got nothing from SHOW SLAVE STATUS"); + } + } + + # Get SHOW MASTER STATUS, and add it to the $status array. + if ($chk_options['master'] + && array_key_exists('log_bin', $status) + && $status['log_bin'] == 'ON' + ) { # See issue #8 + $binlogs = array(0); + $result = run_query("SHOW MASTER LOGS", $conn); + foreach ( $result as $row ) { + $row = array_change_key_case($row, CASE_LOWER); + # Older versions of MySQL may not have the File_size column in the + # results of the command. Zero-size files indicate the user is + # deleting binlogs manually from disk (bad user! bad!). + if (array_key_exists('file_size', $row) && $row['file_size'] > 0 ) { + $binlogs[] = $row['file_size']; + } + } + if (count($binlogs)) { + $status['binary_log_space'] = to_int(array_sum($binlogs)); + } + } + + # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array + # too. + if ($chk_options['procs'] ) { + $result = run_query('SHOW PROCESSLIST', $conn); + foreach ( $result as $row ) { + $state = $row['State']; + if (is_null($state) ) { + $state = 'NULL'; + } + if ($state == '' ) { + $state = 'none'; + } + $state = str_replace(' ', '_', strtolower($state)); + if (array_key_exists("State_$state", $status) ) { + increment($status, "State_$state", 1); + } + else { + increment($status, "State_other", 1); + } + } + } + + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add + # those to the array too. + if ($chk_options['innodb'] + && array_key_exists('have_innodb', $status) + && $status['have_innodb'] == 'YES' + ) { + $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); + $istatus_text = $result[0]['Status']; + $istatus_vals = get_innodb_array($istatus_text); + + # Override values from InnoDB parsing with values from SHOW STATUS, + # because InnoDB status might not have everything and the SHOW STATUS is + # to be preferred where possible. + $overrides = array( + 'Innodb_buffer_pool_pages_data' => 'database_pages', + 'Innodb_buffer_pool_pages_dirty' => 'modified_pages', + 'Innodb_buffer_pool_pages_free' => 'free_pages', + 'Innodb_buffer_pool_pages_total' => 'pool_size', + 'Innodb_data_fsyncs' => 'file_fsyncs', + 'Innodb_data_pending_reads' => 'pending_normal_aio_reads', + 'Innodb_data_pending_writes' => 'pending_normal_aio_writes', + 'Innodb_os_log_pending_fsyncs' => 'pending_log_flushes', + 'Innodb_pages_created' => 'pages_created', + 'Innodb_pages_read' => 'pages_read', + 'Innodb_pages_written' => 'pages_written', + 'Innodb_rows_deleted' => 'rows_deleted', + 'Innodb_rows_inserted' => 'rows_inserted', + 'Innodb_rows_read' => 'rows_read', + 'Innodb_rows_updated' => 'rows_updated', + ); + + # If the SHOW STATUS value exists, override... + foreach ( $overrides as $key => $val ) { + if (array_key_exists($key, $status) ) { + debug("Override $key"); + $istatus_vals[$val] = $status[$key]; + } + } + + # Now copy the values into $status. + foreach ( $istatus_vals as $key => $val ) { + $status[$key] = $istatus_vals[$key]; + } + } + + # Make table_open_cache backwards-compatible (issue 63). + if (array_key_exists('table_open_cache', $status) ) { + $status['table_cache'] = $status['table_open_cache']; + } + + # Compute how much of the key buffer is used and unflushed (issue 127). + $status['Key_buf_bytes_used'] + = big_sub($status['key_buffer_size'], + big_multiply($status['Key_blocks_unused'], + $status['key_cache_block_size'])); + $status['Key_buf_bytes_unflushed'] + = big_multiply($status['Key_blocks_not_flushed'], + $status['key_cache_block_size']); + + if (array_key_exists('unflushed_log', $status) + && $status['unflushed_log'] + ) { + # TODO: I'm not sure what the deal is here; need to debug this. But the + # unflushed log bytes spikes a lot sometimes and it's impossible for it to + # be more than the log buffer. + debug("Unflushed log: $status[unflushed_log]"); + $status['unflushed_log'] + = max($status['unflushed_log'], $status['innodb_log_buffer_size']); + } + + # Define the variables to output. I use shortened variable names so maybe + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must + # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses + # it and uses it as a Perl variable. + $keys = array( + 'Key_read_requests' => 'a0', + 'Key_reads' => 'a1', + 'Key_write_requests' => 'a2', + 'Key_writes' => 'a3', + 'history_list' => 'a4', + 'innodb_transactions' => 'a5', + 'read_views' => 'a6', + 'current_transactions' => 'a7', + 'locked_transactions' => 'a8', + 'active_transactions' => 'a9', + 'pool_size' => 'aa', + 'free_pages' => 'ab', + 'database_pages' => 'ac', + 'modified_pages' => 'ad', + 'pages_read' => 'ae', + 'pages_created' => 'af', + 'pages_written' => 'ag', + 'file_fsyncs' => 'ah', + 'file_reads' => 'ai', + 'file_writes' => 'aj', + 'log_writes' => 'ak', + 'pending_aio_log_ios' => 'al', + 'pending_aio_sync_ios' => 'am', + 'pending_buf_pool_flushes' => 'an', + 'pending_chkp_writes' => 'ao', + 'pending_ibuf_aio_reads' => 'ap', + 'pending_log_flushes' => 'aq', + 'pending_log_writes' => 'ar', + 'pending_normal_aio_reads' => 'as', + 'pending_normal_aio_writes' => 'at', + 'ibuf_inserts' => 'au', + 'ibuf_merged' => 'av', + 'ibuf_merges' => 'aw', + 'spin_waits' => 'ax', + 'spin_rounds' => 'ay', + 'os_waits' => 'az', + 'rows_inserted' => 'b0', + 'rows_updated' => 'b1', + 'rows_deleted' => 'b2', + 'rows_read' => 'b3', + 'Table_locks_waited' => 'b4', + 'Table_locks_immediate' => 'b5', + 'Slow_queries' => 'b6', + 'Open_files' => 'b7', + 'Open_tables' => 'b8', + 'Opened_tables' => 'b9', + 'innodb_open_files' => 'ba', + 'open_files_limit' => 'bb', + 'table_cache' => 'bc', + 'Aborted_clients' => 'bd', + 'Aborted_connects' => 'be', + 'Max_used_connections' => 'bf', + 'Slow_launch_threads' => 'bg', + 'Threads_cached' => 'bh', + 'Threads_connected' => 'bi', + 'Threads_created' => 'bj', + 'Threads_running' => 'bk', + 'max_connections' => 'bl', + 'thread_cache_size' => 'bm', + 'Connections' => 'bn', + 'slave_running' => 'bo', + 'slave_stopped' => 'bp', + 'Slave_retried_transactions' => 'bq', + 'slave_lag' => 'br', + 'Slave_open_temp_tables' => 'bs', + 'Qcache_free_blocks' => 'bt', + 'Qcache_free_memory' => 'bu', + 'Qcache_hits' => 'bv', + 'Qcache_inserts' => 'bw', + 'Qcache_lowmem_prunes' => 'bx', + 'Qcache_not_cached' => 'by', + 'Qcache_queries_in_cache' => 'bz', + 'Qcache_total_blocks' => 'c0', + 'query_cache_size' => 'c1', + 'Questions' => 'c2', + 'Com_update' => 'c3', + 'Com_insert' => 'c4', + 'Com_select' => 'c5', + 'Com_delete' => 'c6', + 'Com_replace' => 'c7', + 'Com_load' => 'c8', + 'Com_update_multi' => 'c9', + 'Com_insert_select' => 'ca', + 'Com_delete_multi' => 'cb', + 'Com_replace_select' => 'cc', + 'Select_full_join' => 'cd', + 'Select_full_range_join' => 'ce', + 'Select_range' => 'cf', + 'Select_range_check' => 'cg', + 'Select_scan' => 'ch', + 'Sort_merge_passes' => 'ci', + 'Sort_range' => 'cj', + 'Sort_rows' => 'ck', + 'Sort_scan' => 'cl', + 'Created_tmp_tables' => 'cm', + 'Created_tmp_disk_tables' => 'cn', + 'Created_tmp_files' => 'co', + 'Bytes_sent' => 'cp', + 'Bytes_received' => 'cq', + 'innodb_log_buffer_size' => 'cr', + 'unflushed_log' => 'cs', + 'log_bytes_flushed' => 'ct', + 'log_bytes_written' => 'cu', + 'relay_log_space' => 'cv', + 'binlog_cache_size' => 'cw', + 'Binlog_cache_disk_use' => 'cx', + 'Binlog_cache_use' => 'cy', + 'binary_log_space' => 'cz', + 'innodb_locked_tables' => 'd0', + 'innodb_lock_structs' => 'd1', + + 'State_closing_tables' => 'd2', + 'State_copying_to_tmp_table' => 'd3', + 'State_end' => 'd4', + 'State_freeing_items' => 'd5', + 'State_init' => 'd6', + 'State_locked' => 'd7', + 'State_login' => 'd8', + 'State_preparing' => 'd9', + 'State_reading_from_net' => 'da', + 'State_sending_data' => 'db', + 'State_sorting_result' => 'dc', + 'State_statistics' => 'dd', + 'State_updating' => 'de', + 'State_writing_to_net' => 'df', + 'State_none' => 'dg', + 'State_other' => 'dh', + + 'Handler_commit' => 'di', + 'Handler_delete' => 'dj', + 'Handler_discover' => 'dk', + 'Handler_prepare' => 'dl', + 'Handler_read_first' => 'dm', + 'Handler_read_key' => 'dn', + 'Handler_read_next' => 'do', + 'Handler_read_prev' => 'dp', + 'Handler_read_rnd' => 'dq', + 'Handler_read_rnd_next' => 'dr', + 'Handler_rollback' => 'ds', + 'Handler_savepoint' => 'dt', + 'Handler_savepoint_rollback' => 'du', + 'Handler_update' => 'dv', + 'Handler_write' => 'dw', + + # Some InnoDB stats added later... + 'innodb_tables_in_use' => 'dx', + 'innodb_lock_wait_secs' => 'dy', + 'hash_index_cells_total' => 'dz', + 'hash_index_cells_used' => 'e0', + 'total_mem_alloc' => 'e1', + 'additional_pool_alloc' => 'e2', + 'uncheckpointed_bytes' => 'e3', + 'ibuf_used_cells' => 'e4', + 'ibuf_free_cells' => 'e5', + 'ibuf_cell_count' => 'e6', + 'adaptive_hash_memory' => 'e7', + 'page_hash_memory' => 'e8', + 'dictionary_cache_memory' => 'e9', + 'file_system_memory' => 'ea', + 'lock_system_memory' => 'eb', + 'recovery_system_memory' => 'ec', + 'thread_hash_memory' => 'ed', + 'innodb_sem_waits' => 'ee', + 'innodb_sem_wait_time_ms' => 'ef', + 'Key_buf_bytes_unflushed' => 'eg', + 'Key_buf_bytes_used' => 'eh', + 'key_buffer_size' => 'ei', + 'Innodb_row_lock_time' => 'ej', + 'Innodb_row_lock_waits' => 'ek', + ); + + # Return the output. + $output = array(); + foreach ($keys as $key => $short ) { + # If the value isn't defined, return -1 which is lower than (most graphs') + # minimum value of 0, so it'll be regarded as a missing value. + $val = isset($status[$key]) ? $status[$key] : -1; + $output[] = "$short:$val"; + } + $result = implode(' ', $output); + if ($fp ) { + if (fwrite($fp, $result) === FALSE ) { + die("Can't write '$cache_file'"); + } + fclose($fp); + } + + return $result; + +} + +# ============================================================================ +# Given INNODB STATUS text, returns a key-value array of the parsed text. Each +# line shows a sample of the input for both standard InnoDB as you would find in +# MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note +# that extra leading spaces are ignored due to trim(). +# ============================================================================ +function get_innodb_array($text) { + $results = array( + 'spin_waits' => array(), + 'spin_rounds' => array(), + 'os_waits' => array(), + 'pending_normal_aio_reads' => null, + 'pending_normal_aio_writes' => null, + 'pending_ibuf_aio_reads' => null, + 'pending_aio_log_ios' => null, + 'pending_aio_sync_ios' => null, + 'pending_log_flushes' => null, + 'pending_buf_pool_flushes' => null, + 'file_reads' => null, + 'file_writes' => null, + 'file_fsyncs' => null, + 'ibuf_inserts' => null, + 'ibuf_merged' => null, + 'ibuf_merges' => null, + 'log_bytes_written' => null, + 'unflushed_log' => null, + 'log_bytes_flushed' => null, + 'pending_log_writes' => null, + 'pending_chkp_writes' => null, + 'log_writes' => null, + 'pool_size' => null, + 'free_pages' => null, + 'database_pages' => null, + 'modified_pages' => null, + 'pages_read' => null, + 'pages_created' => null, + 'pages_written' => null, + 'queries_inside' => null, + 'queries_queued' => null, + 'read_views' => null, + 'rows_inserted' => null, + 'rows_updated' => null, + 'rows_deleted' => null, + 'rows_read' => null, + 'innodb_transactions' => null, + 'unpurged_txns' => null, + 'history_list' => null, + 'current_transactions' => null, + 'hash_index_cells_total' => null, + 'hash_index_cells_used' => null, + 'total_mem_alloc' => null, + 'additional_pool_alloc' => null, + 'last_checkpoint' => null, + 'uncheckpointed_bytes' => null, + 'ibuf_used_cells' => null, + 'ibuf_free_cells' => null, + 'ibuf_cell_count' => null, + 'adaptive_hash_memory' => null, + 'page_hash_memory' => null, + 'dictionary_cache_memory' => null, + 'file_system_memory' => null, + 'lock_system_memory' => null, + 'recovery_system_memory' => null, + 'thread_hash_memory' => null, + 'innodb_sem_waits' => null, + 'innodb_sem_wait_time_ms' => null, + ); + $txn_seen = FALSE; + foreach ( explode("\n", $text) as $line ) { + $line = trim($line); + $row = preg_split('/ +/', $line); + + # SEMAPHORES + if (strpos($line, 'Mutex spin waits') === 0 ) { + # Mutex spin waits 79626940, rounds 157459864, OS waits 698719 + # Mutex spin waits 0, rounds 247280272495, OS waits 316513438 + $results['spin_waits'][] = to_int($row[3]); + $results['spin_rounds'][] = to_int($row[5]); + $results['os_waits'][] = to_int($row[8]); + } + elseif (strpos($line, 'RW-shared spins') === 0 ) { + # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 + $results['spin_waits'][] = to_int($row[2]); + $results['spin_waits'][] = to_int($row[8]); + $results['os_waits'][] = to_int($row[5]); + $results['os_waits'][] = to_int($row[11]); + } + elseif (strpos($line, 'seconds the semaphore:') > 0) { + # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: + increment($results, 'innodb_sem_waits', 1); + increment($results, + 'innodb_sem_wait_time_ms', to_int($row[9]) * 1000); + } + + # TRANSACTIONS + elseif (strpos($line, 'Trx id counter') === 0 ) { + # The beginning of the TRANSACTIONS section: start counting + # transactions + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + $txn_seen = TRUE; + } + elseif (strpos($line, 'Purge done for trx') === 0 ) { + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + $results['unpurged_txns'] + = big_sub($results['innodb_transactions'], $purged_to); + } + elseif (strpos($line, 'History list length') === 0 ) { + # History list length 132 + $results['history_list'] = to_int($row[3]); + } + elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 + increment($results, 'current_transactions', 1); + if (strpos($line, 'ACTIVE') > 0 ) { + increment($results, 'active_transactions', 1); + } + } + elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: + increment($results, 'innodb_lock_wait_secs', to_int($row[5])); + } + elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + # 1 read views open inside InnoDB + $results['read_views'] = to_int($row[0]); + } + elseif (strpos($line, 'mysql tables in use') === 0 ) { + # mysql tables in use 2, locked 2 + increment($results, 'innodb_tables_in_use', to_int($row[4])); + increment($results, 'innodb_locked_tables', to_int($row[6])); + } + elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + # 23 lock struct(s), heap size 3024, undo log entries 27 + # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 + # LOCK WAIT 2 lock struct(s), heap size 368 + if (strpos($line, 'LOCK WAIT') === 0 ) { + increment($results, 'innodb_lock_structs', to_int($row[2])); + increment($results, 'locked_transactions', 1); + } + else { + increment($results, 'innodb_lock_structs', to_int($row[0])); + } + } + + # FILE I/O + elseif (strpos($line, ' OS file reads, ') > 0 ) { + # 8782182 OS file reads, 15635445 OS file writes, 947800 OS fsyncs + $results['file_reads'] = to_int($row[0]); + $results['file_writes'] = to_int($row[4]); + $results['file_fsyncs'] = to_int($row[8]); + } + elseif (strpos($line, 'Pending normal aio reads:') === 0 ) { + # Pending normal aio reads: 0, aio writes: 0, + $results['pending_normal_aio_reads'] = to_int($row[4]); + $results['pending_normal_aio_writes'] = to_int($row[7]); + } + elseif (strpos($line, 'ibuf aio reads') === 0 ) { + # ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0 + $results['pending_ibuf_aio_reads'] = to_int($row[3]); + $results['pending_aio_log_ios'] = to_int($row[6]); + $results['pending_aio_sync_ios'] = to_int($row[9]); + } + elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + # Pending flushes (fsync) log: 0; buffer pool: 0 + $results['pending_log_flushes'] = to_int($row[4]); + $results['pending_buf_pool_flushes'] = to_int($row[7]); + } + + # INSERT BUFFER AND ADAPTIVE HASH INDEX + elseif (strpos($line, 'Ibuf for space 0: size ') === 0 ) { + # Older InnoDB code seemed to be ready for an ibuf per tablespace. It + # had two lines in the output. Newer has just one line, see below. + # Ibuf for space 0: size 1, free list len 887, seg size 889, is not empty + # Ibuf for space 0: size 1, free list len 887, seg size 889, + $results['ibuf_used_cells'] = to_int($row[5]); + $results['ibuf_free_cells'] = to_int($row[9]); + $results['ibuf_cell_count'] = to_int($row[12]); + } + elseif (strpos($line, 'Ibuf: size ') === 0 ) { + # Ibuf: size 1, free list len 4634, seg size 4636, + $results['ibuf_used_cells'] = to_int($row[2]); + $results['ibuf_free_cells'] = to_int($row[6]); + $results['ibuf_cell_count'] = to_int($row[9]); + } + elseif (strpos($line, ' merged recs, ') > 0 ) { + # 19817685 inserts, 19817684 merged recs, 3552620 merges + $results['ibuf_inserts'] = to_int($row[0]); + $results['ibuf_merged'] = to_int($row[2]); + $results['ibuf_merges'] = to_int($row[5]); + } + elseif (strpos($line, 'Hash table size ') === 0 ) { + # In some versions of InnoDB, the used cells is omitted. + # Hash table size 4425293, used cells 4229064, .... + # Hash table size 57374437, node heap has 72964 buffer(s) <-- no used cells + $results['hash_index_cells_total'] = to_int($row[3]); + $results['hash_index_cells_used'] + = strpos($line, 'used cells') > 0 ? to_int($row[6]) : '0'; + } + + # LOG + elseif (strpos($line, " log i/o's done, ") > 0 ) { + # 3430041 log i/o's done, 17.44 log i/o's/second + # 520835887 log i/o's done, 17.28 log i/o's/second, 518724686 syncs, 2980893 checkpoints + # TODO: graph syncs and checkpoints + $results['log_writes'] = to_int($row[0]); + } + elseif (strpos($line, " pending log writes, ") > 0 ) { + # 0 pending log writes, 0 pending chkp writes + $results['pending_log_writes'] = to_int($row[0]); + $results['pending_chkp_writes'] = to_int($row[4]); + } + elseif (strpos($line, "Log sequence number") === 0 ) { + # This number is NOT printed in hex in InnoDB plugin. + # Log sequence number 13093949495856 //plugin + # Log sequence number 125 3934414864 //normal + $results['log_bytes_written'] + = isset($row[4]) + ? make_bigint($row[3], $row[4]) + : to_int($row[3]); + } + elseif (strpos($line, "Log flushed up to") === 0 ) { + # This number is NOT printed in hex in InnoDB plugin. + # Log flushed up to 13093948219327 + # Log flushed up to 125 3934414864 + $results['log_bytes_flushed'] + = isset($row[5]) + ? make_bigint($row[4], $row[5]) + : to_int($row[4]); + } + elseif (strpos($line, "Last checkpoint at") === 0 ) { + # Last checkpoint at 125 3934293461 + $results['last_checkpoint'] + = isset($row[4]) + ? make_bigint($row[3], $row[4]) + : to_int($row[3]); + } + + # BUFFER POOL AND MEMORY + elseif (strpos($line, "Total memory allocated") === 0 ) { + # Total memory allocated 29642194944; in additional pool allocated 0 + $results['total_mem_alloc'] = to_int($row[3]); + $results['additional_pool_alloc'] = to_int($row[8]); + } + elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + # Adaptive hash index 1538240664 (186998824 + 1351241840) + $results['adaptive_hash_memory'] = to_int($row[3]); + } + elseif (strpos($line, 'Page hash ') === 0 ) { + # Page hash 11688584 + $results['page_hash_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Dictionary cache ') === 0 ) { + # Dictionary cache 145525560 (140250984 + 5274576) + $results['dictionary_cache_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'File system ') === 0 ) { + # File system 313848 (82672 + 231176) + $results['file_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Lock system ') === 0 ) { + # Lock system 29232616 (29219368 + 13248) + $results['lock_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Recovery system ') === 0 ) { + # Recovery system 0 (0 + 0) + $results['recovery_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Threads ') === 0 ) { + # Threads 409336 (406936 + 2400) + $results['thread_hash_memory'] = to_int($row[1]); + } + elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + # innodb_io_pattern 0 (0 + 0) + $results['innodb_io_pattern_memory'] = to_int($row[1]); + } + elseif (strpos($line, "Buffer pool size ") === 0 ) { + # The " " after size is necessary to avoid matching the wrong line: + # Buffer pool size 1769471 + # Buffer pool size, bytes 28991012864 + $results['pool_size'] = to_int($row[3]); + } + elseif (strpos($line, "Free buffers") === 0 ) { + # Free buffers 0 + $results['free_pages'] = to_int($row[2]); + } + elseif (strpos($line, "Database pages") === 0 ) { + # Database pages 1696503 + $results['database_pages'] = to_int($row[2]); + } + elseif (strpos($line, "Modified db pages") === 0 ) { + # Modified db pages 160602 + $results['modified_pages'] = to_int($row[3]); + } + elseif (strpos($line, "Pages read ahead") === 0 ) { + # Must do this BEFORE the next test, otherwise it'll get fooled by this + # line from the new plugin (see samples/innodb-015.txt): + # Pages read ahead 0.00/s, evicted without access 0.06/s + # TODO: No-op for now, see issue 134. + } + elseif (strpos($line, "Pages read") === 0 ) { + # Pages read 15240822, created 1770238, written 21705836 + $results['pages_read'] = to_int($row[2]); + $results['pages_created'] = to_int($row[4]); + $results['pages_written'] = to_int($row[6]); + } + + # ROW OPERATIONS + elseif (strpos($line, 'Number of rows inserted') === 0 ) { + # Number of rows inserted 50678311, updated 66425915, deleted 20605903, read 454561562 + $results['rows_inserted'] = to_int($row[4]); + $results['rows_updated'] = to_int($row[6]); + $results['rows_deleted'] = to_int($row[8]); + $results['rows_read'] = to_int($row[10]); + } + elseif (strpos($line, " queries inside InnoDB, ") > 0 ) { + # 0 queries inside InnoDB, 0 queries in queue + $results['queries_inside'] = to_int($row[0]); + $results['queries_queued'] = to_int($row[4]); + } + } + + foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { + $results[$key] = to_int(array_sum($results[$key])); + } + $results['unflushed_log'] + = big_sub($results['log_bytes_written'], $results['log_bytes_flushed']); + $results['uncheckpointed_bytes'] + = big_sub($results['log_bytes_written'], $results['last_checkpoint']); + + +# foreach ($results as $key => $value) { +# echo(strtolower($key).":".strtolower($value)."\n"); +# } + + + return $results; +} + + +# ============================================================================ +# Returns a bigint from two ulint or a single hex number. This is tested in +# t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. +# ============================================================================ +function make_bigint ($hi, $lo = null) { + debug(array($hi, $lo)); + if (is_null($lo) ) { + # Assume it is a hex string representation. + return base_convert($hi, 16, 10); + } + else { + $hi = $hi ? $hi : '0'; # Handle empty-string or whatnot + $lo = $lo ? $lo : '0'; + return big_add(big_multiply($hi, 4294967296), $lo); + } +} + +# ============================================================================ +# Extracts the numbers from a string. You can't reliably do this by casting to +# an int, because numbers that are bigger than PHP's int (varies by platform) +# will be truncated. And you can't use sprintf(%u) either, because the maximum +# value that will return on some platforms is 4022289582. So this just handles +# them as a string instead. It extracts digits until it finds a non-digit and +# quits. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. +# ============================================================================ +function to_int ( $str ) { + debug($str); + global $debug; + preg_match('{(\d+)}', $str, $m); + if (isset($m[1]) ) { + return $m[1]; + } + elseif ($debug ) { + print_r(debug_backtrace()); + } + else { + return 0; + } +} + +# ============================================================================ +# Wrap mysql_query in error-handling, and instead of returning the result, +# return an array of arrays in the result. +# ============================================================================ +function run_query($sql, $conn) { + global $debug; + debug($sql); + $result = @mysql_query($sql, $conn); + if ($debug ) { + $error = @mysql_error($conn); + if ($error ) { + debug(array($sql, $error)); + die("SQLERR $error in $sql"); + } + } + $array = array(); + while ( $row = @mysql_fetch_array($result) ) { + $array[] = $row; + } + debug(array($sql, $array)); + return $array; +} + +# ============================================================================ +# Safely increments a value that might be null. +# ============================================================================ +function increment(&$arr, $key, $howmuch) { + debug(array($key, $howmuch)); + if (array_key_exists($key, $arr) && isset($arr[$key]) ) { + $arr[$key] = big_add($arr[$key], $howmuch); + } + else { + $arr[$key] = $howmuch; + } +} + +# ============================================================================ +# Multiply two big integers together as accurately as possible with reasonable +# effort. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_multiply ($left, $right, $force = null) { + if (function_exists("gmp_mul") && (is_null($force) || $force == 'gmp') ) { + debug(array('gmp_mul', $left, $right)); + return gmp_strval( gmp_mul( $left, $right )); + } + elseif (function_exists("bcmul") && (is_null($force) || $force == 'bc') ) { + debug(array('bcmul', $left, $right)); + return bcmul( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('sprintf', $left, $right)); + return sprintf("%.0f", $left * $right); + } +} + +# ============================================================================ +# Subtract two big integers as accurately as possible with reasonable effort. +# This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_sub ($left, $right, $force = null) { + debug(array($left, $right)); + if (is_null($left) ) { $left = 0; } + if (is_null($right) ) { $right = 0; } + if (function_exists("gmp_sub") && (is_null($force) || $force == 'gmp')) { + debug(array('gmp_sub', $left, $right)); + return gmp_strval( gmp_sub( $left, $right )); + } + elseif (function_exists("bcsub") && (is_null($force) || $force == 'bc')) { + debug(array('bcsub', $left, $right)); + return bcsub( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('to_int', $left, $right)); + return to_int($left - $right); + } +} + +# ============================================================================ +# Add two big integers together as accurately as possible with reasonable +# effort. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_add ($left, $right, $force = null) { + if (is_null($left) ) { $left = 0; } + if (is_null($right) ) { $right = 0; } + if (function_exists("gmp_add") && (is_null($force) || $force == 'gmp')) { + debug(array('gmp_add', $left, $right)); + return gmp_strval( gmp_add( $left, $right )); + } + elseif (function_exists("bcadd") && (is_null($force) || $force == 'bc')) { + debug(array('bcadd', $left, $right)); + return bcadd( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('to_int', $left, $right)); + return to_int($left + $right); + } +} + +# ============================================================================ +# Writes to a debugging log. +# ============================================================================ +function debug($val) { + global $debug_log; + if (!$debug_log ) { + return; + } + if ($fp = fopen($debug_log, 'a+') ) { + $trace = debug_backtrace(); + $calls = array(); + $i = 0; + $line = 0; + $file = ''; + foreach ( debug_backtrace() as $arr ) { + if ($i++ ) { + $calls[] = "$arr[function]() at $file:$line"; + } + $line = array_key_exists('line', $arr) ? $arr['line'] : '?'; + $file = array_key_exists('file', $arr) ? $arr['file'] : '?'; + } + if (!count($calls) ) { + $calls[] = "at $file:$line"; + } + fwrite($fp, date('Y-m-d h:i:s') . ' ' . implode(' <- ', $calls)); + fwrite($fp, "\n" . var_export($val, TRUE) . "\n"); + fclose($fp); + } + else { # Disable logging + print("Warning: disabling debug logging to $debug_log\n"); + $debug_log = FALSE; + } +} + +?> diff --git a/agent-local/nginx b/agent-local/nginx new file mode 100755 index 000000000..d6319f1b2 --- /dev/null +++ b/agent-local/nginx @@ -0,0 +1,38 @@ +#!/usr/bin/env python +import urllib2 +import re + + +data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + + +dataorder = [ + "Active", + "Reading", + "Writing", + "Waiting", + "Requests" + ] + +print "<<>>\n"; + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/agent-local/rpm b/agent-local/rpm new file mode 100755 index 000000000..88483be1d --- /dev/null +++ b/agent-local/rpm @@ -0,0 +1,19 @@ +#!/bin/bash +# Cache the file for 30 minutes +# If you want to override this, put the command in cron. +# We cache because it is a 1sec delay, which is painful for the poller +if [ -x /bin/rpm ]; then + DATE=$(date +%s) + FILE=/tmp/agent-local-rpm + if [ ! -e $FILE ]; then + /bin/rpm -q --all --queryformat '%{N} %{V} %{R} %{ARCH} %{SIZE}\n' > $FILE + fi + FILEMTIME=$(stat -c %Y $FILE) + FILEAGE=$(($DATE-$FILEMTIME)) + if [ $FILEAGE -gt 1800 ]; then + /bin/rpm -q --all --queryformat '%{N} %{V} %{R} %{ARCH} %{SIZE}\n' > $FILE + fi + echo "<<>>" + cat $FILE +fi + diff --git a/agent-local/temperature b/agent-local/temperature new file mode 100755 index 000000000..d00589751 --- /dev/null +++ b/agent-local/temperature @@ -0,0 +1,4 @@ +#!/bin/bash +# example to output some temperatures +#echo "<<>>" +#echo "/dev/sda:" `hddtemp /dev/hda -n` diff --git a/agent-local/tinydns b/agent-local/tinydns new file mode 100755 index 000000000..9c980a80c --- /dev/null +++ b/agent-local/tinydns @@ -0,0 +1,18 @@ +#!/bin/bash +# (c) 2015, f0o@devilcode.org +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +echo "<<>>" +head -n1 /service/dns/log/main/tinystats/tinystats.out + diff --git a/check_mk_COPYING b/check_mk_COPYING new file mode 100644 index 000000000..1942c4334 --- /dev/null +++ b/check_mk_COPYING @@ -0,0 +1,341 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc. + 59 Temple Place - Suite 330 + Boston, MA 02111-1307, USA. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Library General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) 19yy + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License + along with this program; see the file COPYING. If not, write to + the Free Software Foundation, Inc., 59 Temple Place - Suite 330, + Boston, MA 02111-1307, USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) 19yy name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Library General +Public License instead of this License. diff --git a/check_mk_agent b/check_mk_agent new file mode 100755 index 000000000..6b6e03a42 --- /dev/null +++ b/check_mk_agent @@ -0,0 +1,659 @@ +#!/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Remove locale settings to eliminate localized outputs where possible +export LC_ALL=C +unset LANG + +export MK_LIBDIR="/usr/lib/check_mk_agent" +export MK_CONFDIR="/etc/check_mk" +export MK_VARDIR="/var/lib/check_mk_agent" + +# Provide information about the remote host. That helps when data +# is being sent only once to each remote host. +if [ "$REMOTE_HOST" ] ; then + export REMOTE=$REMOTE_HOST +elif [ "$SSH_CLIENT" ] ; then + export REMOTE=${SSH_CLIENT%% *} +fi + +# Make sure, locally installed binaries are found +PATH=$PATH:/usr/local/bin + +# All executables in PLUGINSDIR will simply be executed and their +# ouput appended to the output of the agent. Plugins define their own +# sections and must output headers with '<<<' and '>>>' +PLUGINSDIR=$MK_LIBDIR/plugins + +# All executables in LOCALDIR will by executabled and their +# output inserted into the section <<>>. Please +# refer to online documentation for details about local checks. +LOCALDIR=$MK_LIBDIR/local + +# All files in SPOOLDIR will simply appended to the agent +# output if they are not outdated (see below) +SPOOLDIR=$MK_VARDIR/spool + +# close standard input (for security reasons) and stderr +if [ "$1" = -d ] +then + set -xv +else + exec /dev/null +fi + +# Runs a command asynchronous by use of a cache file +function run_cached () { + local section= + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_VARDIR/cache ]; then mkdir -p $MK_VARDIR/cache ; fi + CACHEFILE="$MK_VARDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + local NOW=$(date +%s) + if [ -e "$CACHEFILE.new" ] ; then + local CF_ATIME=$(stat -c %X "$CACHEFILE.new") + if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then + # Kill the process still accessing that file in case + # it is still running. This avoids overlapping processes! + fuser -k -9 "$CACHEFILE.new" >/dev/null 2>&1 + rm -f "$CACHEFILE.new" + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + local MTIME=$(stat -c %Y "$CACHEFILE") + if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + echo "set -o noclobber ; exec > \"$CACHEFILE.new\" || exit 1 ; $CMDLINE && mv \"$CACHEFILE.new\" \"$CACHEFILE\" || rm -f \"$CACHEFILE\" \"$CACHEFILE.new\"" | nohup bash >/dev/null 2>&1 & + fi +} + +# Make run_cached available for subshells (plugins, local checks, etc.) +export -f run_cached + +echo '<<>>' +echo Version: 1.2.6b5 +echo AgentOS: linux +echo AgentDirectory: $MK_CONFDIR +echo DataDirectory: $MK_VARDIR +echo SpoolDirectory: $SPOOLDIR +echo PluginsDirectory: $PLUGINSDIR +echo LocalDirectory: $LOCALDIR + +# If we are called via xinetd, try to find only_from configuration +if [ -n "$REMOTE_HOST" ] +then + echo -n 'OnlyFrom: ' + echo $(sed -n '/^service[[:space:]]*check_mk/,/}/s/^[[:space:]]*only_from[[:space:]]*=[[:space:]]*\(.*\)/\1/p' /etc/xinetd.d/* | head -n1) +fi + +# Print out Partitions / Filesystems. (-P gives non-wrapped POSIXed output) +# Heads up: NFS-mounts are generally supressed to avoid agent hangs. +# If hard NFS mounts are configured or you have too large nfs retry/timeout +# settings, accessing those mounts from the agent would leave you with +# thousands of agent processes and, ultimately, a dead monitored system. +# These should generally be monitored on the NFS server, not on the clients. + +echo '<<>>' +# The exclusion list is getting a bit of a problem. -l should hide any remote FS but seems +# to be all but working. +excludefs="-x smbfs -x cifs -x iso9660 -x udf -x nfsv4 -x nfs -x mvfs -x zfs" +df -PTlk $excludefs | sed 1d + +# df inodes information +echo '<<>>' +echo '[df_inodes_start]' +df -PTli $excludefs | sed 1d +echo '[df_inodes_end]' + +# Filesystem usage for ZFS +if type zfs > /dev/null 2>&1 ; then + echo '<<>>' + zfs get -Hp name,quota,used,avail,mountpoint,type -t filesystem,volume || \ + zfs get -Hp name,quota,used,avail,mountpoint,type + echo '[df]' + df -PTlk -t zfs | sed 1d +fi + +# Check NFS mounts by accessing them with stat -f (System +# call statfs()). If this lasts more then 2 seconds we +# consider it as hanging. We need waitmax. +if type waitmax >/dev/null +then + STAT_VERSION=$(stat --version | head -1 | cut -d" " -f4) + STAT_BROKE="5.3.0" + + echo '<<>>' + sed -n '/ nfs4\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts | + sed 's/\\040/ /g' | + while read MP + do + if [ $STAT_VERSION != $STAT_BROKE ]; then + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \ + echo "$MP hanging 0 0 0 0" + else + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \ + printf '\n'|| echo "$MP hanging 0 0 0 0" + fi + done + + echo '<<>>' + sed -n '/ cifs\? /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts | + sed 's/\\040/ /g' | + while read MP + do + if [ $STAT_VERSION != $STAT_BROKE ]; then + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \ + echo "$MP hanging 0 0 0 0" + else + waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \ + printf '\n'|| echo "$MP hanging 0 0 0 0" + fi + done +fi + +# Check mount options. Filesystems may switch to 'ro' in case +# of a read error. +echo '<<>>' +grep ^/dev < /proc/mounts + +# processes including username, without kernel processes +echo '<<>>' +ps ax -o user,vsz,rss,cputime,pid,command --columns 10000 | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4,\5) /' + +# Memory usage +echo '<<>>' +egrep -v '^Swap:|^Mem:|total:' < /proc/meminfo + +# Load and number of processes +echo '<<>>' +echo "$(cat /proc/loadavg) $(grep -E '^CPU|^processor' < /proc/cpuinfo | wc -l)" + +# Uptime +echo '<<>>' +cat /proc/uptime + + +# New variant: Information about speed and state in one section +echo '<<>>' +sed 1,2d /proc/net/dev +if type ethtool > /dev/null +then + for eth in $(sed -e 1,2d < /proc/net/dev | cut -d':' -f1 | sort) + do + echo "[$eth]" + ethtool $eth | egrep '(Speed|Duplex|Link detected|Auto-negotiation):' + echo -en "\tAddress: " ; cat /sys/class/net/$eth/address ; echo + done +fi + + +# Current state of bonding interfaces +if [ -e /proc/net/bonding ] ; then + echo '<<>>' + pushd /proc/net/bonding > /dev/null ; head -v -n 1000 * ; popd +fi + +# Same for Open vSwitch bonding +if type ovs-appctl > /dev/null ; then + echo '<<>>' + for bond in $(ovs-appctl bond/list | sed -e 1d | cut -f2) ; do + echo "[$bond]" + ovs-appctl bond/show $bond + done +fi + + +# Number of TCP connections in the various states +echo '<<>>' +# waitmax 10 netstat -nt | awk ' /^tcp/ { c[$6]++; } END { for (x in c) { print x, c[x]; } }' +# New implementation: netstat is very slow for large TCP tables +cat /proc/net/tcp /proc/net/tcp6 2>/dev/null | awk ' /:/ { c[$4]++; } END { for (x in c) { print x, c[x]; } }' + +# Linux Multipathing +if type multipath >/dev/null ; then + echo '<<>>' + multipath -l +fi + +# Performancecounter Platten +echo '<<>>' +date +%s +egrep ' (x?[shv]d[a-z]*|cciss/c[0-9]+d[0-9]+|emcpower[a-z]+|dm-[0-9]+|VxVM.*|mmcblk.*) ' < /proc/diskstats +if type dmsetup >/dev/null ; then + echo '[dmsetup_info]' + dmsetup info -c --noheadings --separator ' ' -o name,devno,vg_name,lv_name +fi +if [ -d /dev/vx/dsk ] ; then + echo '[vx_dsk]' + stat -c "%t %T %n" /dev/vx/dsk/*/* +fi + + +# Performancecounter Kernel +echo '<<>>' +date +%s +cat /proc/vmstat /proc/stat + +# Hardware sensors via IPMI (need ipmitool) +if type ipmitool > /dev/null +then + run_cached -s ipmi 300 "ipmitool sensor list | grep -v 'command failed' | sed -e 's/ *| */|/g' -e 's/ /_/g' -e 's/_*"'$'"//' -e 's/|/ /g' | egrep -v '^[^ ]+ na ' | grep -v ' discrete '" +fi + + +# IPMI data via ipmi-sensors (of freeipmi). Please make sure, that if you +# have installed freeipmi that IPMI is really support by your hardware. +if type ipmi-sensors >/dev/null +then + echo '<<>>' + # Newer ipmi-sensors version have new output format; Legacy format can be used + if ipmi-sensors --help | grep -q legacy-output; then + IPMI_FORMAT="--legacy-output" + else + IPMI_FORMAT="" + fi + # At least with ipmi-sensoirs 0.7.16 this group is Power_Unit instead of "Power Unit" + run_cached -s ipmi_sensors 300 "for class in Temperature Power_Unit Fan + do + ipmi-sensors $IPMI_FORMAT --sdr-cache-directory /var/cache -g "$class" | sed -e 's/ /_/g' -e 's/:_\?/ /g' -e 's@ \([^(]*\)_(\([^)]*\))@ \2_\1@' + # In case of a timeout immediately leave loop. + if [ $? = 255 ] ; then break ; fi + done" +fi + +# RAID status of Linux software RAID +echo '<<>>' +cat /proc/mdstat + +# RAID status of Linux RAID via device mapper +if type dmraid >/dev/null && DMSTATUS=$(dmraid -r) +then + echo '<<>>' + + # Output name and status + dmraid -s | grep -e ^name -e ^status + + # Output disk names of the RAID disks + DISKS=$(echo "$DMSTATUS" | cut -f1 -d\:) + + for disk in $DISKS ; do + device=$(cat /sys/block/$(basename $disk)/device/model ) + status=$(echo "$DMSTATUS" | grep ^${disk}) + echo "$status Model: $device" + done +fi + +# RAID status of LSI controllers via cfggen +if type cfggen > /dev/null ; then + echo '<<>>' + cfggen 0 DISPLAY | egrep '(Target ID|State|Volume ID|Status of volume)[[:space:]]*:' | sed -e 's/ *//g' -e 's/:/ /' +fi + +# RAID status of LSI MegaRAID controller via MegaCli. You can download that tool from: +# http://www.lsi.com/downloads/Public/MegaRAID%20Common%20Files/8.02.16_MegaCLI.zip +if type MegaCli >/dev/null ; then + MegaCli_bin="MegaCli" +elif type MegaCli64 >/dev/null ; then + MegaCli_bin="MegaCli64" +elif type megacli >/dev/null ; then + MegaCli_bin="megacli" +else + MegaCli_bin="unknown" +fi + +if [ "$MegaCli_bin" != "unknown" ]; then + echo '<<>>' + for part in $($MegaCli_bin -EncInfo -aALL -NoLog < /dev/null \ + | sed -rn 's/:/ /g; s/[[:space:]]+/ /g; s/^ //; s/ $//; s/Number of enclosures on adapter ([0-9]+).*/adapter \1/g; /^(Enclosure|Device ID|adapter) [0-9]+$/ p'); do + [ $part = adapter ] && echo "" + [ $part = 'Enclosure' ] && echo -ne "\ndev2enc" + echo -n " $part" + done + echo + $MegaCli_bin -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry|Adapter' + echo '<<>>' + $MegaCli_bin -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' + echo '<<>>' + $MegaCli_bin -AdpBbuCmd -GetBbuStatus -aALL -NoLog < /dev/null | grep -v Exit +fi + +# RAID status of 3WARE disk controller (by Radoslaw Bak) +if type tw_cli > /dev/null ; then + for C in $(tw_cli show | awk 'NR < 4 { next } { print $1 }'); do + echo '<<<3ware_info>>>' + tw_cli /$C show all | egrep 'Model =|Firmware|Serial' + echo '<<<3ware_disks>>>' + tw_cli /$C show drivestatus | egrep 'p[0-9]' | sed "s/^/$C\//" + echo '<<<3ware_units>>>' + tw_cli /$C show unitstatus | egrep 'u[0-9]' | sed "s/^/$C\//" + done +fi + +# RAID controllers from areca (Taiwan) +# cli64 can be found at ftp://ftp.areca.com.tw/RaidCards/AP_Drivers/Linux/CLI/ +if type cli64 >/dev/null ; then + run_cached -s arc_raid_status 300 "cli64 rsf info | tail -n +3 | head -n -2" +fi + +# VirtualBox Guests. Section must always been output. Otherwise the +# check would not be executed in case no guest additions are installed. +# And that is something the check wants to detect +echo '<<>>' +if type VBoxControl >/dev/null 2>&1 ; then + VBoxControl -nologo guestproperty enumerate | cut -d, -f1,2 + [ ${PIPESTATUS[0]} = 0 ] || echo "ERROR" +fi + +# OpenVPN Clients. Currently we assume that the configuration # is in +# /etc/openvpn. We might find a safer way to find the configuration later. +if [ -e /etc/openvpn/openvpn-status.log ] ; then + echo '<<>>' + sed -n -e '/CLIENT LIST/,/ROUTING TABLE/p' < /etc/openvpn/openvpn-status.log | sed -e 1,3d -e '$d' +fi + +# Time synchronization with NTP +if type ntpq > /dev/null 2>&1 ; then + # remove heading, make first column space separated + run_cached -s ntp 30 "waitmax 5 ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/'" +fi + +# Time synchronization with Chrony +if type chronyc > /dev/null 2>&1 ; then + # Force successful exit code. Otherwise section will be missing if daemon not running + run_cached -s chrony 30 "waitmax 5 chronyc tracking || true" +fi + +if type nvidia-settings >/dev/null && [ -S /tmp/.X11-unix/X0 ] +then + echo '<<>>' + for var in GPUErrors GPUCoreTemp + do + DISPLAY=:0 waitmax 2 nvidia-settings -t -q $var | sed "s/^/$var: /" + done +fi + +if [ -e /proc/drbd ]; then + echo '<<>>' + cat /proc/drbd +fi + +# Status of CUPS printer queues +if type lpstat > /dev/null 2>&1; then + if pgrep cups > /dev/null 2>&1; then + echo '<<>>' + CPRINTCONF=/etc/cups/printers.conf + if [ -r "$CPRINTCONF" ] ; then + LOCAL_PRINTERS=$(grep -E "<(Default)?Printer .*>" $CPRINTCONF | awk '{print $2}' | sed -e 's/>//') + lpstat -p | while read LINE + do + PRINTER=$(echo $LINE | awk '{print $2}') + if echo "$LOCAL_PRINTERS" | grep -q "$PRINTER"; then + echo $LINE + fi + done + echo '---' + lpstat -o | while read LINE + do + PRINTER=${LINE%%-*} + if echo "$LOCAL_PRINTERS" | grep -q "$PRINTER"; then + echo $LINE + fi + done + else + lpstat -p + echo '---' + lpstat -o | sort + fi + fi +fi + +# Heartbeat monitoring +# Different handling for heartbeat clusters with and without CRM +# for the resource state +if [ -S /var/run/heartbeat/crm/cib_ro -o -S /var/run/crm/cib_ro ] || pgrep crmd > /dev/null 2>&1; then + echo '<<>>' + crm_mon -1 -r | grep -v ^$ | sed 's/^ //; /^\sResource Group:/,$ s/^\s//; s/^\s/_/g' +fi +if type cl_status > /dev/null 2>&1; then + echo '<<>>' + cl_status rscstatus + + echo '<<>>' + for NODE in $(cl_status listnodes); do + if [ $NODE != $(echo $HOSTNAME | tr 'A-Z' 'a-z') ]; then + STATUS=$(cl_status nodestatus $NODE) + echo -n "$NODE $STATUS" + for LINK in $(cl_status listhblinks $NODE 2>/dev/null); do + echo -n " $LINK $(cl_status hblinkstatus $NODE $LINK)" + done + echo + fi + done +fi + +# Postfix mailqueue monitoring +# +# Only handle mailq when postfix user is present. The mailq command is also +# available when postfix is not installed. But it produces different outputs +# which are not handled by the check at the moment. So try to filter out the +# systems not using postfix by searching for the postfix user.a +# +# Cannot take the whole outout. This could produce several MB of agent output +# on blocking queues. +# Only handle the last 6 lines (includes the summary line at the bottom and +# the last message in the queue. The last message is not used at the moment +# but it could be used to get the timestamp of the last message. +if type postconf >/dev/null ; then + echo '<<>>' + postfix_queue_dir=$(postconf -h queue_directory) + postfix_count=$(find $postfix_queue_dir/deferred -type f | wc -l) + postfix_size=$(du -ks $postfix_queue_dir/deferred | awk '{print $1 }') + if [ $postfix_count -gt 0 ] + then + echo -- $postfix_size Kbytes in $postfix_count Requests. + else + echo Mail queue is empty + fi +elif [ -x /usr/sbin/ssmtp ] ; then + echo '<<>>' + mailq 2>&1 | sed 's/^[^:]*: \(.*\)/\1/' | tail -n 6 +fi + +#Check status of qmail mailqueue +if type qmail-qstat >/dev/null +then + echo "<<>>" + qmail-qstat +fi + +# Check status of OMD sites +if type omd >/dev/null +then + run_cached -s omd_status 60 "omd status --bare --auto" +fi + + +# Welcome the ZFS check on Linux +# We do not endorse running ZFS on linux if your vendor doesnt support it ;) +# check zpool status +if type zpool >/dev/null; then + echo "<<>>" + zpool status -x +fi + + +# Fileinfo-Check: put patterns for files into /etc/check_mk/fileinfo.cfg +if [ -r "$MK_CONFDIR/fileinfo.cfg" ] ; then + echo '<<>>' + date +%s + stat -c "%n|%s|%Y" $(cat "$MK_CONFDIR/fileinfo.cfg") +fi + +# Get stats about OMD monitoring cores running on this machine. +# Since cd is a shell builtin the check does not affect the performance +# on non-OMD machines. +if cd /omd/sites +then + echo '<<>>' + for site in * + do + if [ -S "/omd/sites/$site/tmp/run/live" ] ; then + echo "[$site]" + echo -e "GET status" | waitmax 3 /omd/sites/$site/bin/unixcat /omd/sites/$site/tmp/run/live + fi + done +fi + +# Get statistics about monitored jobs. Below the job directory there +# is a sub directory per user that ran a job. That directory must be +# owned by the user so that a symlink or hardlink attack for reading +# arbitrary files can be avoided. +if pushd $MK_VARDIR/job >/dev/null; then + echo '<<>>' + for username in * + do + if [ -d "$username" ] && cd "$username" ; then + su "$username" -c "head -n -0 -v *" + cd .. + fi + done + popd > /dev/null +fi + +# Gather thermal information provided e.g. by acpi +# At the moment only supporting thermal sensors +if ls /sys/class/thermal/thermal_zone* >/dev/null 2>&1; then + echo '<<>>' + for F in /sys/class/thermal/thermal_zone*; do + echo -n "${F##*/} " + if [ ! -e $F/mode ] ; then echo -n "- " ; fi + cat $F/{mode,type,temp,trip_point_*} | tr \\n " " + echo + done +fi + +# Libelle Business Shadow +if type trd >/dev/null; then + echo "<<>>" + trd -s +fi + +# MK's Remote Plugin Executor +if [ -e "$MK_CONFDIR/mrpe.cfg" ] +then + echo '<<>>' + grep -Ev '^[[:space:]]*($|#)' "$MK_CONFDIR/mrpe.cfg" | \ + while read descr cmdline + do + PLUGIN=${cmdline%% *} + OUTPUT=$(eval "$cmdline") + echo -n "(${PLUGIN##*/}) $descr $? $OUTPUT" | tr \\n \\1 + echo + done +fi + + +# Local checks +echo '<<>>' +if cd $LOCALDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached local_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + +# Plugins +if cd $PLUGINSDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every Xth minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + +# Agent output snippets created by cronjobs, etc. +if [ -d "$SPOOLDIR" ] +then + pushd "$SPOOLDIR" > /dev/null + now=$(date +%s) + + for file in * + do + # output every file in this directory. If the file is prefixed + # with a number, then that number is the maximum age of the + # file in seconds. If the file is older than that, it is ignored. + maxage="" + part="$file" + + # Each away all digits from the front of the filename and + # collect them in the variable maxage. + while [ "${part/#[0-9]/}" != "$part" ] + do + maxage=$maxage${part:0:1} + part=${part:1} + done + + # If there is at least one digit, than we honor that. + if [ "$maxage" ] ; then + mtime=$(stat -c %Y "$file") + if [ $((now - mtime)) -gt $maxage ] ; then + continue + fi + fi + + # Output the file + cat "$file" + done + popd > /dev/null +fi diff --git a/check_mk_xinetd b/check_mk_xinetd new file mode 100644 index 000000000..9fefd584c --- /dev/null +++ b/check_mk_xinetd @@ -0,0 +1,49 @@ +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +service check_mk +{ + type = UNLISTED + port = 6556 + socket_type = stream + protocol = tcp + wait = no + user = root + server = /usr/bin/check_mk_agent + + # If you use fully redundant monitoring and poll the client + # from more then one monitoring servers in parallel you might + # want to use the agent cache wrapper: + #server = /usr/bin/check_mk_caching_agent + + # configure the IP address(es) of your Nagios server here: + #only_from = 127.0.0.1 10.0.20.1 10.0.20.2 + + # Don't be too verbose. Don't log every check. This might be + # commented out for debugging. If this option is commented out + # the default options will be used for this service. + log_on_success = + + disable = no +} From 4cf699291c92bf54181560212158713eba83bb5a Mon Sep 17 00:00:00 2001 From: Daniel Preussker Date: Tue, 9 Jun 2015 17:34:00 +0000 Subject: [PATCH 002/497] Delete README.md --- README.md | 120 ------------------------------------------------------ 1 file changed, 120 deletions(-) delete mode 100644 README.md diff --git a/README.md b/README.md deleted file mode 100644 index 2340b52e6..000000000 --- a/README.md +++ /dev/null @@ -1,120 +0,0 @@ -Agent setup ------------ - -To gather data from remote systems you can use LibreNMS in combination with check_mk (included in the librenms-agent repository). - -The agent uses TCP-Port 6556, please allow access from the LibreNMS-Host and Poller-Nodes if you're using the Distributed Polling setup. - -On each of the hosts you would like to use the agent on then you need to do the following: - -* Copy the `check_mk_agent` script into `/usr/bin` and make it executable. - -```shell -cp check_mk_agent /usr/bin/check_mk_agent -chmod +x /usr/bin/check_mk_agent -``` - -* Copy the xinetd config file into place. - -```shell -cp check_mk_xinetd /etc/xinetd.d/check_mk -``` - -* Create the relevant directories. - -```shell -mkdir -p /usr/lib/check_mk_agent/plugins /usr/lib/check_mk_agent/local -``` - -* Copy each of the scripts from `agent-local/` into `/usr/lib/check_mk_agent/local` -* And restart xinetd. - -```shell -/etc/init.d/xinetd restart -``` - -* Login to the LibreNMS web interface and edit the device you want to monitor. Under the modules section, ensure that unix-agent is enabled. -* Then under Applications, enable the apps that you plan to monitor. -* Wait, in around 10 minutes you should start seeing data in your graphs under Apps for the device. - -## Application Specific Configuration - -### BIND9/named - -Create stats file with appropriate permissions: -```shell -~$ touch /etc/bind/named.stats -~$ chown bind:bind /etc/bind/named.stats -``` -Change `user:group` to the user and group that's running bind/named. - -Bind/named configuration: -```text -options { - ... - statistics-file "/etc/bind/named.stats"; - zone-statistics yes; - ... -}; -``` -Restart your bind9/named after changing the configuration. - -Verify that everything works by executing `rndc stats && cat /etc/bind/named.stats`. -In case you get a `Permission Denied` error, make sure you chown'ed correctly. - -Note: if you change the path you will need to change the path in `agent-local/bind`. - -### TinyDNS/djbdns - -__Installation__: - -1. Get tinystats sources from http://www.morettoni.net/tinystats.en.html -2. Compile like as advised. - _Note_: In case you get `Makefile:9: *** missing separator. Stop.`, compile manually using: - * With IPv6: `gcc -Wall -O2 -fstack-protector -DWITH_IPV6 -o tinystats tinystats.c` - * Without IPv6: `gcc -Wall -O2 -fstack-protector -o tinystats tinystats.c` -3. Install into prefered path, like `/usr/bin/`. - -__Configuration__: - -_Note_: In this part we assume that you use DJB's [Daemontools](http://cr.yp.to/daemontools.html) to start/stop tinydns. -And that your tinydns-instance is located in `/service/dns`, adjust this path if necesary. - -1. Replace your _log_'s `run` file, typically located in `/service/dns/log/run` with: - ``` - #!/bin/sh - - exec setuidgid dnslog tinystats ./main/tinystats/ multilog t n3 s250000 ./main/ - ``` -2. Create tinystats directory and chown: - `mkdir /service/dns/log/main/tinystats && chown dnslog:nofiles /service/dns/log/main/tinystats` -3. Restart TinyDNS and Daemontools: `/etc/init.d/svscan restart` - _Note_: Some say `svc -t /service/dns` is enough, on my install (Gentoo) it doesnt rehook the logging and I'm forced to restart it entirely. - -### MySQL - -Unlike most other scripts, the MySQL script requires a configuration file `/usr/lib/check_mk_agent/local/mysql.cnf` with following content: - -```php - Date: Tue, 28 Jul 2015 15:58:29 -0300 Subject: [PATCH 003/497] Added Snmpd.conf example and distro executable --- snmp/distro | 66 +++++++++++++++++++++++++++++++++++++++++ snmp/snmpd.conf.example | 13 ++++++++ 2 files changed, 79 insertions(+) create mode 100755 snmp/distro create mode 100644 snmp/snmpd.conf.example diff --git a/snmp/distro b/snmp/distro new file mode 100755 index 000000000..922960d2b --- /dev/null +++ b/snmp/distro @@ -0,0 +1,66 @@ +#!/bin/sh +# Detects which OS and if it is Linux then it will detect which Linux Distribution. + +OS=`uname -s` +REV=`uname -r` +MACH=`uname -m` + +if [ "${OS}" = "SunOS" ] ; then + OS=Solaris + ARCH=`uname -p` + OSSTR="${OS} ${REV}(${ARCH} `uname -v`)" +elif [ "${OS}" = "AIX" ] ; then + OSSTR="${OS} `oslevel` (`oslevel -r`)" +elif [ "${OS}" = "Linux" ] ; then + KERNEL=`uname -r` + if [ -f /etc/redhat-release ] ; then + DIST=$(cat /etc/redhat-release | awk '{print $1}') + if [ "${DIST}" = "CentOS" ]; then + DIST="CentOS" + elif [ "${DIST}" = "Mandriva" ]; then + DIST="Mandriva" + PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` + REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` + else + DIST="RedHat" + fi + + PSEUDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//` + REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/SuSE-release ] ; then + DIST=`cat /etc/SuSE-release | tr "\n" ' '| sed s/VERSION.*//` + REV=`cat /etc/SuSE-release | tr "\n" ' ' | sed s/.*=\ //` + elif [ -f /etc/mandrake-release ] ; then + DIST='Mandrake' + PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//` + REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/debian_version ] ; then + DIST="Debian `cat /etc/debian_version`" + REV="" + elif [ -f /etc/gentoo-release ] ; then + DIST="Gentoo" + REV=$(tr -d '[[:alpha:]]' + +#Distro Detection +extend .1.3.6.1.4.1.2021.7890.1 distro /usr/bin/distro + From f1db7742a61316e6cb365d0ca99683f107d16374 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 15:12:12 +0200 Subject: [PATCH 004/497] Add files to create a Debian-package --- Makefile | 13 +++++++++++++ debian/changelog | 5 +++++ debian/compat | 1 + debian/control | 13 +++++++++++++ debian/postinst | 37 +++++++++++++++++++++++++++++++++++++ debian/rules | 7 +++++++ 6 files changed, 76 insertions(+) create mode 100644 Makefile create mode 100644 debian/changelog create mode 100644 debian/compat create mode 100644 debian/control create mode 100644 debian/postinst create mode 100755 debian/rules diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..39ff98b71 --- /dev/null +++ b/Makefile @@ -0,0 +1,13 @@ +PREFIX=${CURDIR}/debian/librenms-agent + +install: + mkdir -p $(PREFIX)/usr/lib/check_mk_agent/plugins + mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local + cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/plugins/ + mkdir -p $(PREFIX)/usr/bin + install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent + mkdir -p $(PREFIX)/etc/xinet.d + install -m 0644 check_mk_xinetd $(PREFIX)/etc/xinet.d/check_mk + +clean: + rm -rf $(CURDIR)/build diff --git a/debian/changelog b/debian/changelog new file mode 100644 index 000000000..edfd4b72d --- /dev/null +++ b/debian/changelog @@ -0,0 +1,5 @@ +librenms-agent (1.0) stable; urgency=low + + - Initial package release + + -- Mark Schouten Mon, 17 Aug 2015 15:00:00 +0200 diff --git a/debian/compat b/debian/compat new file mode 100644 index 000000000..7f8f011eb --- /dev/null +++ b/debian/compat @@ -0,0 +1 @@ +7 diff --git a/debian/control b/debian/control new file mode 100644 index 000000000..69e20f864 --- /dev/null +++ b/debian/control @@ -0,0 +1,13 @@ +Source: librenms-agent +Section: misc +Priority: extra +Maintainer: Mark Schouten +Build-Depends: debhelper (>= 5) +Standards-Version: 3.9.1 + +Package: librenms-agent +Architecture: all +Pre-Depends: xinetd +Suggests: php-cli, php-mysql, libwww-perl, curl, dmidecode, netcat, ipmitool, lm-sensors, python-memcache, python-urllib3, rrdtool, libdbd-pg-perl +Description: Install the LibreNMS Unix Agent + Install the LibreNMS Unix Agent. Please note that the suggested packages are required for some of the plugins diff --git a/debian/postinst b/debian/postinst new file mode 100644 index 000000000..9c3782c7d --- /dev/null +++ b/debian/postinst @@ -0,0 +1,37 @@ +#!/bin/sh +set -e + +# summary of how this script can be called: +# * `configure' +# * `abort-upgrade' +# * `abort-remove' `in-favour' +# +# * `abort-deconfigure' `in-favour' +# `removing' +# +# for details, see http://www.debian.org/doc/debian-policy/ or +# the debian-policy package +# + +case "$1" in + configure) + [ -x /usr/sbin/service ] && service xinetd restart + ;; + + abort-upgrade|abort-remove|abort-deconfigure) + ;; + + *) + echo "postinst called with unknown argument \`$1'" >&2 + exit 1 + ;; +esac + +# dh_installdeb will replace this with shell code automatically +# generated by other debhelper scripts. + +#DEBHELPER# + +exit 0 + + diff --git a/debian/rules b/debian/rules new file mode 100755 index 000000000..edfc6500e --- /dev/null +++ b/debian/rules @@ -0,0 +1,7 @@ +#!/usr/bin/make -f + +#export DH_VERBOSE=1 + +%: + dh $@ + From 3febf20c20e03abb23e6514589055bbbe938eb1d Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 16:19:04 +0200 Subject: [PATCH 005/497] Add Conflicts/Provides and fix location for xinetd.d --- Makefile | 4 ++-- debian/control | 2 ++ 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 39ff98b71..00e019e1c 100644 --- a/Makefile +++ b/Makefile @@ -6,8 +6,8 @@ install: cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/plugins/ mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent - mkdir -p $(PREFIX)/etc/xinet.d - install -m 0644 check_mk_xinetd $(PREFIX)/etc/xinet.d/check_mk + mkdir -p $(PREFIX)/etc/xinetd.d + install -m 0644 check_mk_xinetd $(PREFIX)/etc/xinetd.d/check_mk clean: rm -rf $(CURDIR)/build diff --git a/debian/control b/debian/control index 69e20f864..730529c28 100644 --- a/debian/control +++ b/debian/control @@ -8,6 +8,8 @@ Standards-Version: 3.9.1 Package: librenms-agent Architecture: all Pre-Depends: xinetd +Conflicts: check-mk-agent +Provides: check-mk-agent Suggests: php-cli, php-mysql, libwww-perl, curl, dmidecode, netcat, ipmitool, lm-sensors, python-memcache, python-urllib3, rrdtool, libdbd-pg-perl Description: Install the LibreNMS Unix Agent Install the LibreNMS Unix Agent. Please note that the suggested packages are required for some of the plugins From 4858338668fa4d0130234055255fe8b64d4ac53f Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 16:48:22 +0200 Subject: [PATCH 006/497] Place all plugins in a repo-dir and add mk_enplug to enable plugins --- Makefile | 4 +++- mk_enplug | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+), 1 deletion(-) create mode 100755 mk_enplug diff --git a/Makefile b/Makefile index 00e019e1c..f8c18c66f 100644 --- a/Makefile +++ b/Makefile @@ -2,10 +2,12 @@ PREFIX=${CURDIR}/debian/librenms-agent install: mkdir -p $(PREFIX)/usr/lib/check_mk_agent/plugins + mkdir -p $(PREFIX)/usr/lib/check_mk_agent/repo mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local - cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/plugins/ + cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent + install -m 0750 mk_enplug $(PREFIX)/usr/bin/mk_enplug mkdir -p $(PREFIX)/etc/xinetd.d install -m 0644 check_mk_xinetd $(PREFIX)/etc/xinetd.d/check_mk diff --git a/mk_enplug b/mk_enplug new file mode 100755 index 000000000..06c575204 --- /dev/null +++ b/mk_enplug @@ -0,0 +1,48 @@ +#!/bin/bash + +plugdir=/usr/lib/check_mk_agent/plugins +repodir=/usr/lib/check_mk_agent/repo + +findscripts() { + find ${repodir} -type f | sed -e "s#$repodir/##g" +} + +script_enabled() { + s=$1 + + if [ -L ${plugdir}/${s} ]; then + echo "yes" + else + echo "no" + fi +} + +enable_script() { + s=$1 + + ln -s ${repodir}/${s} ${plugdir}/${s} + echo "Enabled $s" +} + +scripts=$(findscripts) + +if [ ! -z "$1" ]; then + s=$1 +else + echo "Which plugin do you want to enable?" + echo ${scripts} + read s +fi + +echo "Enabling $s" + +if [ ! -z "$s" ]; then + if [ ! -r ${repodir}/${s} ]; then + echo "Plugin $s does not exist!" + exit 1 + fi + + if [ `script_enabled $s` != "yes" ]; then + enable_script $s + fi +fi From b77ef66ce9610829d28c3a0309f357c71c3684d1 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 16:50:26 +0200 Subject: [PATCH 007/497] Enable dpkg and dmi by default --- debian/postinst | 3 +++ 1 file changed, 3 insertions(+) diff --git a/debian/postinst b/debian/postinst index 9c3782c7d..cd01d1133 100644 --- a/debian/postinst +++ b/debian/postinst @@ -16,6 +16,9 @@ set -e case "$1" in configure) [ -x /usr/sbin/service ] && service xinetd restart + echo "Enable some default services" + /usr/bin/mk_enplug dpkg + /usr/bin/mk_enplug dmi ;; abort-upgrade|abort-remove|abort-deconfigure) From 4d574dc619ad4ceb4ebdbb3571382bfb7234bfef Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 16:57:48 +0200 Subject: [PATCH 008/497] Strip comments (on Qemu boxes, this pollutes a lot --- agent-local/dmi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/dmi b/agent-local/dmi index f040c89dd..58ec19725 100755 --- a/agent-local/dmi +++ b/agent-local/dmi @@ -5,5 +5,5 @@ echo '<<>>' # requires dmidecode for FIELD in bios-vendor bios-version bios-release-date system-manufacturer system-product-name system-version system-serial-number system-uuid baseboard-manufacturer baseboard-product-name baseboard-version baseboard-serial-number baseboard-asset-tag chassis-manufacturer chassis-type chassis-version chassis-serial-number chassis-asset-tag processor-family processor-manufacturer processor-version processor-frequency do - echo $FIELD=$(dmidecode -s $FIELD) + echo $FIELD=$(dmidecode -s $FIELD | grep -v '^#') done From 9a4dfd1f2be6ce5da43305e47c1ad195f983a4c1 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 17 Aug 2015 16:58:33 +0200 Subject: [PATCH 009/497] Also include distro in this package --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index f8c18c66f..203025400 100644 --- a/Makefile +++ b/Makefile @@ -8,6 +8,7 @@ install: mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent install -m 0750 mk_enplug $(PREFIX)/usr/bin/mk_enplug + install -m 0750 snmp/distro $(PREFIX)/usr/bin/distro mkdir -p $(PREFIX)/etc/xinetd.d install -m 0644 check_mk_xinetd $(PREFIX)/etc/xinetd.d/check_mk From b2e65a2e10feb5b72a2adf53252079a585b07d54 Mon Sep 17 00:00:00 2001 From: Daniel Preussker Date: Tue, 18 Aug 2015 15:08:50 +0000 Subject: [PATCH 010/497] Fix MySQL Host Logic --- agent-local/mysql | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index c5077c859..82967c520 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -268,8 +268,7 @@ function ss_get_mysql_stats( $options ) { $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. - $host_str = $host - . $port != 3306 ? ":$port" : ''; + $host_str = $host.($port != 3306 ? ":$port" : ''); debug(array('connecting to', $host_str, $user, $pass)); if (!extension_loaded('mysql') ) { debug("The MySQL extension is not loaded"); From af2925982293aac3658cad0b2d6945a77cb93486 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Thu, 20 Aug 2015 10:34:50 +0200 Subject: [PATCH 011/497] Add license --- mk_enplug | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/mk_enplug b/mk_enplug index 06c575204..db524ec2f 100755 --- a/mk_enplug +++ b/mk_enplug @@ -1,5 +1,19 @@ #!/bin/bash +# Copyright (C) 2015 Mark Schouten +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 dated June, +# 1991. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# See http://www.gnu.org/licenses/gpl.txt for the full license + plugdir=/usr/lib/check_mk_agent/plugins repodir=/usr/lib/check_mk_agent/repo From 614d751043805551adaaa68567918f6eee3d745d Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Thu, 20 Aug 2015 14:18:10 +0200 Subject: [PATCH 012/497] Do not include the README in the repodir --- Makefile | 1 + debian/changelog | 6 ++++++ 2 files changed, 7 insertions(+) diff --git a/Makefile b/Makefile index 203025400..31d16d2df 100644 --- a/Makefile +++ b/Makefile @@ -5,6 +5,7 @@ install: mkdir -p $(PREFIX)/usr/lib/check_mk_agent/repo mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ + rm $(PREFIX)/usr/lib/check_mk_agent/repo/README mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent install -m 0750 mk_enplug $(PREFIX)/usr/bin/mk_enplug diff --git a/debian/changelog b/debian/changelog index edfd4b72d..7765710e6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +librenms-agent (1.0.1) stable; urgency=low + + - Do not include the README in the repodir + + -- Mark Schouten Thu, 20 Aug 2015 14:15:00 +0200 + librenms-agent (1.0) stable; urgency=low - Initial package release From 8406f9b04dd6ba7c41c7fd77a17125d58a8571a2 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Fri, 28 Aug 2015 10:49:19 +0200 Subject: [PATCH 013/497] Add proxmox-agent --- agent-local/proxmox | 37 +++++++++++++++++++++++++++++++++++++ 1 file changed, 37 insertions(+) create mode 100755 agent-local/proxmox diff --git a/agent-local/proxmox b/agent-local/proxmox new file mode 100755 index 000000000..5c7b40bec --- /dev/null +++ b/agent-local/proxmox @@ -0,0 +1,37 @@ +#!/usr/bin/perl -w + +use constant { + TIMEZONE => 'Europe/Amsterdam' +}; + +use strict; +use PVE::API2Client; +use PVE::AccessControl; +use PVE::INotify; +use Data::Dumper; + +my $hostname = PVE::INotify::read_file("hostname"); + +my $ticket = PVE::AccessControl::assemble_ticket('root@pam'); +my $csrftoken = PVE::AccessControl::assemble_csrf_prevention_token('root@pam'); + +my $conn = PVE::API2Client->new( + ticket => $ticket, + csrftoken => $csrftoken, +); + +my $clustername = $conn->get("/api2/json/cluster/ha/config")->{'data'}->{'children'}[0]{'name'}; +if (!defined($clustername)) { + $clustername = $hostname; +} + + +print "<<>>\n"; + +print "$clustername\n"; + +foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")->{'data'}}) { + my $vmid = $vm->{'vmid'}; + my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'name'}; + print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; +}; From 170d8775a25fedaee413c11e864a2755cc5b9978 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Fri, 28 Aug 2015 10:49:24 +0200 Subject: [PATCH 014/497] Add proxmox-agent --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 7765710e6..a82b5bc7c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +librenms-agent (1.0.2) stable; urgency=low + + - Include a Proxmox-agent + + -- Mark Schouten Fri, 28 Aug 2015 10:33:00 +0200 + librenms-agent (1.0.1) stable; urgency=low - Do not include the README in the repodir From a2cb1bd1cbbf72b08b407a3d201f37142f6cdaea Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Fri, 28 Aug 2015 10:52:04 +0200 Subject: [PATCH 015/497] Add license --- agent-local/proxmox | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/agent-local/proxmox b/agent-local/proxmox index 5c7b40bec..fd1f73a58 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -1,5 +1,19 @@ #!/usr/bin/perl -w +# Copyright (C) 2015 Mark Schouten +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 dated June, +# 1991. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# See http://www.gnu.org/licenses/gpl.txt for the full license + use constant { TIMEZONE => 'Europe/Amsterdam' }; From 1c58fad39dc73d0c1a028059836ab6b79cd6e42c Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Sun, 25 Oct 2015 21:51:43 +0000 Subject: [PATCH 016/497] Update distro to use env --- snmp/distro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index 922960d2b..8d080c9d4 100755 --- a/snmp/distro +++ b/snmp/distro @@ -1,4 +1,4 @@ -#!/bin/sh +#!/usr/bin/env bash # Detects which OS and if it is Linux then it will detect which Linux Distribution. OS=`uname -s` From 14f50729d238c2b197a700474d90477de1b9b2f6 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 4 Nov 2015 14:30:21 +0100 Subject: [PATCH 017/497] Fix the proxmox-agent for Proxmox VE 4.0 --- agent-local/proxmox | 10 +++++++--- debian/changelog | 6 ++++++ 2 files changed, 13 insertions(+), 3 deletions(-) diff --git a/agent-local/proxmox b/agent-local/proxmox index fd1f73a58..a35ade497 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -34,11 +34,15 @@ my $conn = PVE::API2Client->new( csrftoken => $csrftoken, ); -my $clustername = $conn->get("/api2/json/cluster/ha/config")->{'data'}->{'children'}[0]{'name'}; -if (!defined($clustername)) { - $clustername = $hostname; +foreach my $child (@{$conn->get("/api2/json/cluster/status")->{'data'}}) { + if ($child->{'type'} eq "cluster") { + $clustername = $child->{'name'}; + } } +if (!defined($clustername)) { + $clustername = $hostname; +} print "<<>>\n"; diff --git a/debian/changelog b/debian/changelog index a82b5bc7c..91f87774b 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +librenms-agent (1.0.3) stable; urgency=low + + - Fix the proxmox-agent for Proxmox VE 4.0 + + -- Mark Schouten Wed, 04 Nov 2015 14:33:00 +0200 + librenms-agent (1.0.2) stable; urgency=low - Include a Proxmox-agent From 0c6b4ce81de7c1b30b5fd1d17d9bd220cb3ee495 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 4 Nov 2015 14:40:19 +0100 Subject: [PATCH 018/497] Crap, forgot this line... --- agent-local/proxmox | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agent-local/proxmox b/agent-local/proxmox index a35ade497..afa83beeb 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -34,6 +34,8 @@ my $conn = PVE::API2Client->new( csrftoken => $csrftoken, ); +my $clustername; + foreach my $child (@{$conn->get("/api2/json/cluster/status")->{'data'}}) { if ($child->{'type'} eq "cluster") { $clustername = $child->{'name'}; From 14a6d2690cc421395bc9dd7bbaea011c287e346c Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Tue, 10 Nov 2015 10:58:24 +0100 Subject: [PATCH 019/497] Add support for Ceph --- agent-local/ceph | 65 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 65 insertions(+) create mode 100755 agent-local/ceph diff --git a/agent-local/ceph b/agent-local/ceph new file mode 100755 index 000000000..60966aa27 --- /dev/null +++ b/agent-local/ceph @@ -0,0 +1,65 @@ +#!/usr/bin/env python + +# Copyright (C) 2015 Mark Schouten +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; version 2 dated June, +# 1991. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# See http://www.gnu.org/licenses/gpl.txt for the full license + +from subprocess import check_output +import json + +def cephdf(): + cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]) + + s = json.loads(cephdf) + print("c:%i:%i:%i" % (s['stats']['total_bytes'], s['stats']['total_used_bytes'], s['stats']['total_avail_bytes'])) + + for p in s['pools']: + b = p['stats']['bytes_used'] + a = p['stats']['max_avail'] + o = p['stats']['objects'] + print("%s:%i:%i:%i" % (p['name'], a, b, o)) + + +def osdperf(): + osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]) + + for o in json.loads(osdperf)['osd_perf_infos']: + print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + +def poolstats(): + poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]) + + for p in json.loads(poolstats): + try: + r = p['client_io_rate']['read_bytes_sec'] + except: + r = 0 + try: + w = p['client_io_rate']['write_bytes_sec'] + except: + w = 0 + try: + o = p['client_io_rate']['op_per_sec'] + except: + o = 0 + + print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) + +print "<<>>" +print "" +poolstats() +print "" +osdperf() +print "" +cephdf() + From 27f0a025cabab3ceea2003e31ccbaed4d53ae8ad Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Tue, 10 Nov 2015 11:00:58 +0100 Subject: [PATCH 020/497] Add support for Ceph --- debian/changelog | 6 ++++++ debian/control | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/debian/changelog b/debian/changelog index 91f87774b..6e5ab31f6 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +librenms-agent (1.0.4) stable; urgency=low + + - Include a Ceph agent + + -- Mark Schouten Wed, 10 Nov 2015 10:58:00 +0200 + librenms-agent (1.0.3) stable; urgency=low - Fix the proxmox-agent for Proxmox VE 4.0 diff --git a/debian/control b/debian/control index 730529c28..536b52b44 100644 --- a/debian/control +++ b/debian/control @@ -10,6 +10,6 @@ Architecture: all Pre-Depends: xinetd Conflicts: check-mk-agent Provides: check-mk-agent -Suggests: php-cli, php-mysql, libwww-perl, curl, dmidecode, netcat, ipmitool, lm-sensors, python-memcache, python-urllib3, rrdtool, libdbd-pg-perl +Suggests: php-cli, php-mysql, libwww-perl, curl, dmidecode, netcat, ipmitool, lm-sensors, python-memcache, python-urllib3, rrdtool, libdbd-pg-perl, python-simplejson Description: Install the LibreNMS Unix Agent Install the LibreNMS Unix Agent. Please note that the suggested packages are required for some of the plugins From 864fe7c174e26bb8102dcd4a8499743398cbb6b0 Mon Sep 17 00:00:00 2001 From: Mike Rostermund Date: Wed, 11 Nov 2015 14:21:49 +0100 Subject: [PATCH 021/497] Moved mysql tag a bit up and added a newline to error msg. --- agent-local/mysql | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 82967c520..255952c95 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -74,10 +74,12 @@ $version = "1.1.7"; # ============================================================================ # Include settings from an external config file (issue 39). # ============================================================================ +echo("<<>>\n"); + if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); } else { - echo("No ".__FILE__ . '.cnf found!'); + echo("No ".__FILE__ . ".cnf found!\n"); exit(); } @@ -132,8 +134,6 @@ if (!isset($called_by_script_server)) { ob_end_flush(); # In debugging mode, print out the errors. } - echo("<<>>\n"); - # Split the result up and extract only the desired parts of it. $options['items'] = ""; $wanted = explode(',', $options['items']); From c0e297090fd7ef567a2a5b23ebef7a7c9f099b8f Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 23 Nov 2015 14:10:15 +0100 Subject: [PATCH 022/497] Add PowerDNS Authoritative Agent --- agent-local/powerdns | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100755 agent-local/powerdns diff --git a/agent-local/powerdns b/agent-local/powerdns new file mode 100755 index 000000000..eaee4cd5e --- /dev/null +++ b/agent-local/powerdns @@ -0,0 +1,22 @@ +#!/usr/bin/env python + +from subprocess import check_output + +vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', +'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', +'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', +'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', +'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', +'udp4-queries', 'udp6-answers', 'udp6-queries' ] + +rvars = {} + +for l in check_output(['/usr/bin/pdns_control', 'show', '*']).rstrip().split(','): + v = l.split('=') + if len(v) > 1: + rvars[v[0]] = v[1] + +print "<<>>" + +for k in vars: + print rvars[k] From b7ea6aa4f239a4bc8d5a32f1f0a8fb0ee08ba362 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 23 Nov 2015 14:10:17 +0100 Subject: [PATCH 023/497] Add PowerDNS Authoritative Agent --- debian/changelog | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/debian/changelog b/debian/changelog index 6e5ab31f6..a0676a941 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,9 @@ +librenms-agent (1.0.5) stable; urgency=low + + - Include a PowerDNS Authoritative agent + + -- Mark Schouten Mon, 23 Nov 2015 14:10:00 +0200 + librenms-agent (1.0.4) stable; urgency=low - Include a Ceph agent From 95923c184374d82d1330c8f6ce5fab49e715356f Mon Sep 17 00:00:00 2001 From: f0o Date: Wed, 25 Nov 2015 13:26:26 +0000 Subject: [PATCH 024/497] Snapshot upstream changes --- snmp/distro | 43 ++++++++++++++++++++++++++++++++++--------- 1 file changed, 34 insertions(+), 9 deletions(-) diff --git a/snmp/distro b/snmp/distro index 8d080c9d4..96cbbe75f 100755 --- a/snmp/distro +++ b/snmp/distro @@ -9,10 +9,13 @@ if [ "${OS}" = "SunOS" ] ; then OS=Solaris ARCH=`uname -p` OSSTR="${OS} ${REV}(${ARCH} `uname -v`)" + elif [ "${OS}" = "AIX" ] ; then OSSTR="${OS} `oslevel` (`oslevel -r`)" + elif [ "${OS}" = "Linux" ] ; then KERNEL=`uname -r` + if [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then @@ -27,26 +30,43 @@ elif [ "${OS}" = "Linux" ] ; then PSEUDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//` REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//` - elif [ -f /etc/SuSE-release ] ; then - DIST=`cat /etc/SuSE-release | tr "\n" ' '| sed s/VERSION.*//` - REV=`cat /etc/SuSE-release | tr "\n" ' ' | sed s/.*=\ //` + elif [ -f /etc/mandrake-release ] ; then DIST='Mandrake' PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//` REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" + elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" REV=$(tr -d '[[:alpha:]]' Date: Mon, 28 Mar 2016 12:56:26 -0500 Subject: [PATCH 025/497] Add systemd unit files --- check_mk.socket | 9 +++++++++ check_mk@.service | 7 +++++++ 2 files changed, 16 insertions(+) create mode 100644 check_mk.socket create mode 100644 check_mk@.service diff --git a/check_mk.socket b/check_mk.socket new file mode 100644 index 000000000..02760ff0e --- /dev/null +++ b/check_mk.socket @@ -0,0 +1,9 @@ +[Unit] +Description=Check_MK LibreNMS Agent Socket + +[Socket] +ListenStream=6556 +Accept=yes + +[Install] +WantedBy=sockets.target diff --git a/check_mk@.service b/check_mk@.service new file mode 100644 index 000000000..02fce603f --- /dev/null +++ b/check_mk@.service @@ -0,0 +1,7 @@ +[Unit] +Description=Check_MK LibreNMS Agent Script +After=network.target + +[Service] +ExecStart=/usr/bin/check_mk_agent +StandardOutput=socket From 6b7111ffd315617241a3931b753a593f84d779fd Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Tue, 29 Mar 2016 08:29:02 -0500 Subject: [PATCH 026/497] Fix wording for systemd unit --- check_mk@.service | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/check_mk@.service b/check_mk@.service index 02fce603f..bea7144d8 100644 --- a/check_mk@.service +++ b/check_mk@.service @@ -1,5 +1,5 @@ [Unit] -Description=Check_MK LibreNMS Agent Script +Description=Check_MK LibreNMS Agent Service After=network.target [Service] From 3e7d59b3b1e25149dc5d16f08f4abf6af186c778 Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Thu, 21 Apr 2016 15:39:59 +0100 Subject: [PATCH 027/497] Create check_mk_agent_freebsd Added freebsd agent --- check_mk_agent_freebsd | 426 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 426 insertions(+) create mode 100644 check_mk_agent_freebsd diff --git a/check_mk_agent_freebsd b/check_mk_agent_freebsd new file mode 100644 index 000000000..f5bb36aa7 --- /dev/null +++ b/check_mk_agent_freebsd @@ -0,0 +1,426 @@ +#!/usr/local/bin/bash +# +------------------------------------------------------------------+ +# | ____ _ _ __ __ _ __ | +# | / ___| |__ ___ ___| | __ | \/ | |/ / | +# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / | +# | | |___| | | | __/ (__| < | | | | . \ | +# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ | +# | | +# | Copyright Mathias Kettner 2014 mk@mathias-kettner.de | +# +------------------------------------------------------------------+ +# +# This file is part of Check_MK. +# The official homepage is at http://mathias-kettner.de/check_mk. +# +# check_mk is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation in version 2. check_mk is distributed +# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with- +# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. See the GNU General Public License for more de- +# ails. You should have received a copy of the GNU General Public +# License along with GNU Make; see the file COPYING. If not, write +# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, +# Boston, MA 02110-1301 USA. + +# Author: Lars Michelsen +# Florian Heigl +# (Added sections: df mount mem netctr ipmitool) + +# NOTE: This agent has beed adapted from the Check_MK linux agent. +# The most sections are commented out at the moment because +# they have not been ported yet. We will try to adapt most +# sections to print out the same output as the linux agent so +# that the current checks can be used. + +# This might be a good source as description of sysctl output: +# http://people.freebsd.org/~hmp/utilities/satbl/_sysctl.html + +# Remove locale settings to eliminate localized outputs where possible +export LC_ALL=C +unset LANG + +export MK_LIBDIR="/usr/local/lib/check_mk_agent" +export MK_CONFDIR="/etc/check_mk" +export MK_TMPDIR="/var/run/check_mk" + + +# Make sure, locally installed binaries are found +PATH=$PATH:/usr/local/bin:/usr/local/sbin + +# All executables in PLUGINSDIR will simply be executed and their +# ouput appended to the output of the agent. Plugins define their own +# sections and must output headers with '<<<' and '>>>' +PLUGINSDIR=$MK_LIBDIR/plugins + +# All executables in LOCALDIR will by executabled and their +# output inserted into the section <<>>. Please refer +# to online documentation for details. +LOCALDIR=$MK_LIBDIR/local + + +# close standard input (for security reasons) and stderr +if [ "$1" = -d ] +then + set -xv +else + exec /dev/null +fi + +# Runs a command asynchronous by use of a cache file +function run_cached() { + if [ "$1" = -s ] ; then local section="echo '<<<$2>>>' ; " ; shift ; fi + local NAME=$1 + local MAXAGE=$2 + shift 2 + local CMDLINE="$section$@" + + if [ ! -d $MK_TMPDIR/cache ]; then mkdir -p $MK_TMPDIR/cache ; fi + CACHEFILE="$MK_TMPDIR/cache/$NAME.cache" + + # Check if the creation of the cache takes suspiciously long and return + # nothing if the age (access time) of $CACHEFILE.new is twice the MAXAGE + local NOW=$(date +%s) + if [ -e "$CACHEFILE.new" ] ; then + local CF_ATIME=$(stat -f "%a" "$CACHEFILE.new") + if [ $((NOW - CF_ATIME)) -ge $((MAXAGE * 2)) ] ; then + return + fi + fi + + # Check if cache file exists and is recent enough + if [ -s "$CACHEFILE" ] ; then + local MTIME=$(stat -f "%m" "$CACHEFILE") + if [ $((NOW - MTIME)) -le $MAXAGE ] ; then local USE_CACHEFILE=1 ; fi + # Output the file in any case, even if it is + # outdated. The new file will not yet be available + cat "$CACHEFILE" + fi + + # Cache file outdated and new job not yet running? Start it + if [ -z "$USE_CACHEFILE" -a ! -e "$CACHEFILE.new" ] ; then + echo "$CMDLINE" | daemon bash -o noclobber > $CACHEFILE.new && mv $CACHEFILE.new $CACHEFILE || rm -f $CACHEFILE $CACHEFILE.new & + fi +} + +echo '<<>>' +echo Version: 1.2.6p16 +echo AgentOS: freebsd + + + +osver="$(uname -r)" +is_jailed="$(sysctl -n security.jail.jailed)" + + +# Partitionen (-P verhindert Zeilenumbruch bei langen Mountpunkten) +# Achtung: NFS-Mounts werden grundsaetzlich ausgeblendet, um +# Haenger zu vermeiden. Diese sollten ohnehin besser auf dem +# Server, als auf dem Client ueberwacht werden. + +echo '<<>>' +# no special zfs handling so far, the ZFS.pools plugin has been tested to +# work on FreeBSD +if df -T > /dev/null ; then + df -kTP -t ufs | egrep -v '(Filesystem|devfs|procfs|fdescfs|basejail)' +else + df -kP -t ufs | egrep -v '(Filesystem|devfs|procfs|fdescfs|basejail)' | awk '{ print $1,"ufs",$2,$3,$4,$5,$6 }' +fi + +# Filesystem usage for ZFS +if type zfs > /dev/null 2>&1 ; then + echo '<<>>' + zfs get -Hp name,quota,used,avail,mountpoint,type -t filesystem,volume || \ + zfs get -Hp name,quota,used,avail,mountpoint,type + echo '[df]' + df -kP -t zfs | sed 1d +fi + +# Check NFS mounts by accessing them with stat -f (System +# call statfs()). If this lasts more then 2 seconds we +# consider it as hanging. We need waitmax. +#if type waitmax >/dev/null +#then +# STAT_VERSION=$(stat --version | head -1 | cut -d" " -f4) +# STAT_BROKE="5.3.0" +# +# echo '<<>>' +# sed -n '/ nfs /s/[^ ]* \([^ ]*\) .*/\1/p' < /proc/mounts | +# while read MP +# do +# if [ $STAT_VERSION != $STAT_BROKE ]; then +# waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" || \ +# echo "$MP hanging 0 0 0 0" +# else +# waitmax -s 9 2 stat -f -c "$MP ok %b %f %a %s" "$MP" && \ +# printf '\n'|| echo "$MP hanging 0 0 0 0" +# fi +# done +#fi + +# Check mount options. +# FreeBSD doesn't do remount-ro on errors, but the users might consider +# security related mount options more important. +echo '<<>>' +mount -p -t ufs + +# processes including username, without kernel processes +echo '<<>>' +COLUMNS=10000 +if [ "$is_jailed" = "0" ]; then + ps ax -o state,user,vsz,rss,pcpu,command | sed -e 1d -e '/\([^ ]*J\) */d' -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\2,\3,\4,\5) /' +else + ps ax -o user,vsz,rss,pcpu,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' +fi + + +# Produce compatible load/cpu output to linux agent. Not so easy here. +echo '<<>>' +echo `sysctl -n vm.loadavg | tr -d '{}'` `top -b -n 1 | grep -E '^[0-9]+ processes' | awk '{print $3"/"$1}'` `sysctl -n kern.lastpid` `sysctl -n hw.ncpu` + +# Calculate the uptime in seconds since epoch compatible to /proc/uptime in linux +echo '<<>>' +up_seconds=$(( `date +%s` - `sysctl -n kern.boottime | cut -f1 -d\, | awk '{print $4}'`)) +idle_seconds=$(ps axw | grep idle | grep -v grep | awk '{print $4}' | cut -f1 -d\: ) +echo "$up_seconds $idle_seconds" + +# Platten- und RAID-Status von LSI-Controlleren, falls vorhanden +#if which cfggen > /dev/null ; then +# echo '<<>>' +# cfggen 0 DISPLAY | egrep '(Target ID|State|Volume ID|Status of volume)[[:space:]]*:' | sed -e 's/ *//g' -e 's/:/ /' +#fi + + +# Multipathing is supported in FreeBSD by now +# http://www.mywushublog.com/2010/06/freebsd-and-multipath/ +if kldstat -v | grep g_multipath > /dev/null ; then + echo '<<>>' + gmultipath status | grep -v ^Name +fi + + +# Soft-RAID +echo '<<>>' +gmirror status | grep -v ^Name + +# Performancecounter Kernel +echo "<<>>" +date +%s +forks=`sysctl -n vm.stats.vm.v_forks` +vforks=`sysctl -n vm.stats.vm.v_vforks` +rforks=`sysctl -n vm.stats.vm.v_rforks` +kthreads=`sysctl -n vm.stats.vm.v_kthreads` +echo "cpu" `sysctl -n kern.cp_time | awk ' { print $1" "$2" "$3" "$5" "$4 } '` +echo "ctxt" `sysctl -n vm.stats.sys.v_swtch` +echo "processes" `expr $forks + $vforks + $rforks + $kthreads` + +# Network device statistics (Packets, Collisions, etc) +# only the "Link/Num" interface has all counters. +echo '<<>>' +date +%s +if [ "$(echo $osver | cut -f1 -d\. )" -gt "8" ]; then + netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{print $1" "$8" "$5" "$6" "$7" 0 0 0 0 "$11" "$9" "$10" 0 0 0 0 0"}' +else + # pad output for freebsd 7 and before + netstat -inb | egrep -v '(^Name|lo|plip)' | grep Link | awk '{print $1" "$7" "$5" "$6" 0 0 0 0 0 "$10" "$8" "$9" 0 0 "$11" 0 0"}' +fi + + +# IPMI-Data (Fans, CPU, temperature, etc) +# needs the sysutils/ipmitool and kldload ipmi.ko +if which ipmitool >/dev/null ; then + echo '<<>>' + ipmitool sensor list \ + | grep -v 'command failed' \ + | sed -e 's/ *| */|/g' -e "s/ /_/g" -e 's/_*$//' -e 's/|/ /g' \ + | egrep -v '^[^ ]+ na ' \ + | grep -v ' discrete ' +fi + + +# State of LSI MegaRAID controller via MegaCli. +# To install: pkg install megacli +if which MegaCli >/dev/null ; then + echo '<<>>' + MegaCli -PDList -aALL -NoLog < /dev/null | egrep 'Enclosure|Raw Size|Slot Number|Device Id|Firmware state|Inquiry' + echo '<<>>' + MegaCli -LDInfo -Lall -aALL -NoLog < /dev/null | egrep 'Size|State|Number|Adapter|Virtual' + echo '<<>>' + MegaCli -AdpBbuCmd -GetBbuStatus -aALL -NoLog < /dev/null | grep -v Exit +fi + + +# OpenVPN Clients. +# Correct log location unknown, sed call might also be broken +if [ -e /var/log/openvpn/openvpn-status.log ] ; then + echo '<<>>' + sed -n -e '/CLIENT LIST/,/ROUTING TABLE/p' < /var/log/openvpn/openvpn-status.log | sed -e 1,3d -e '$d' +fi + + +if which ntpq > /dev/null 2>&1 ; then + echo '<<>>' + # remote heading, make first column space separated + ntpq -np | sed -e 1,2d -e 's/^\(.\)/\1 /' -e 's/^ /%/' +fi + + +# Checks for cups monitoring +#if which lpstat > /dev/null 2>&1; then +# echo '<<>>' +# lpstat -p +# echo '---' +# for i in $(lpstat -p | grep -E "^(printer|Drucker)" | awk '{print $2}' | grep -v "@"); do +# lpstat -o "$i" +# done +#fi + +# Heartbeat monitoring +#if which cl_status > /dev/null 2>&1; then +# # Different handling for heartbeat clusters with and without CRM +# # for the resource state +# if [ -S /var/run/heartbeat/crm/cib_ro ]; then +# echo '<<>>' +# crm_mon -1 -r | grep -v ^$ | sed 's/^\s/_/g' +# else +# echo '<<>>' +# cl_status rscstatus +# fi +# +# echo '<<>>' +# for NODE in $(cl_status listnodes); do +# if [ $NODE != $HOSTNAME ]; then +# STATUS=$(cl_status nodestatus $NODE) +# echo -n "$NODE $STATUS" +# for LINK in $(cl_status listhblinks $NODE 2>/dev/null); do +# echo -n " $LINK $(cl_status hblinkstatus $NODE $LINK)" +# done +# echo +# fi +# done +#fi + +# Number of TCP connections in the various states +echo '<<>>' +netstat -na | awk ' /^tcp/ { c[$6]++; } END { for (x in c) { print x, c[x]; } }' + + +# Postfix mailqueue monitoring +# +# Only handle mailq when postfix user is present. The mailq command is also +# available when postfix is not installed. But it produces different outputs +# which are not handled by the check at the moment. So try to filter out the +# systems not using postfix by searching for the postfix user. +# +# Cannot take the whole outout. This could produce several MB of agent output +# on blocking queues. +# Only handle the last 6 lines (includes the summary line at the bottom and +# the last message in the queue. The last message is not used at the moment +# but it could be used to get the timestamp of the last message. +if which mailq >/dev/null 2>&1 && getent passwd postfix >/dev/null 2>&1; then + echo '<<>>' + mailq | tail -n 6 +fi + +#Check status of qmail mailqueue +if type qmail-qstat >/dev/null +then + echo "<<>>" + qmail-qstat +fi + +# check zpool status +if [ -x /sbin/zpool ]; then + echo "<<>>" + /sbin/zpool status -x | grep -v "errors: No known data errors" +fi + + +# Statgrab +# To install: pkg install libstatgrab +if type statgrab 2>&1 >/dev/null ; then + + statgrab_vars="const. disk. general. page. proc. user." + statgrab_vars_mem="mem. swap." + statgrab_sections="proc disk page" + + statgrab $statgrab_vars 1> /tmp/statgrab.$$ + statgrab $statgrab_vars_mem 1>>/tmp/statgrab.$$ + + + for s in $statgrab_sections + do + echo "<<>>" + grep "^${s}\." /tmp/statgrab.$$ | cut -d. -f2-99 | sed 's/ *= */ /' + done + + echo '<<>>' + statgrab net. 2>&1 | cut -d. -f2-99 | sed 's/ *= */ /' + + echo '<<>>' + egrep "^(swap|mem)\." /tmp/statgrab.$$ | sed 's/ *= */ /' + + [ -f /tmp/statgrab.$$ ] && rm -f /tmp/statgrab.$$ +fi + + +# Fileinfo-Check: put patterns for files into /etc/check_mk/fileinfo.cfg +if [ -r "$MK_CONFDIR/fileinfo.cfg" ] ; then + echo '<<>>' + date +%s + for line in $(cat "$MK_CONFDIR/fileinfo.cfg") + do + stat -f "%N|%z|%m" $line 2>/dev/null + + if [ $? -ne 0 ]; then + echo "$line|missing|$(date +%s)" + fi + done +fi + + +# Local checks +echo '<<>>' +if cd $LOCALDIR ; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached local_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + +# Plugins +if cd $PLUGINSDIR; then + for skript in $(ls) ; do + if [ -f "$skript" -a -x "$skript" ] ; then + ./$skript + fi + done + # Call some plugins only every X'th minute + for skript in [1-9]*/* ; do + if [ -x "$skript" ] ; then + run_cached plugins_${skript//\//\\} ${skript%/*} "$skript" + fi + done +fi + + +# MK's Remote Plugin Executor +if [ -e "$MK_CONFDIR/mrpe.cfg" ] +then + echo '<<>>' + grep -Ev '^[[:space:]]*($|#)' "$MK_CONFDIR/mrpe.cfg" | \ + while read descr cmdline + do + PLUGIN=${cmdline%% *} + OUTPUT=$(eval "$cmdline") + echo -n "(${PLUGIN##*/}) $descr $? $OUTPUT" | tr \\n \\1 + echo + done +fi From 636faae45117bc4e86954b721d2551d64d75e46f Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Thu, 21 Apr 2016 15:41:06 +0100 Subject: [PATCH 028/497] Update check_mk_agent_freebsd --- check_mk_agent_freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/check_mk_agent_freebsd b/check_mk_agent_freebsd index f5bb36aa7..37debc4ef 100644 --- a/check_mk_agent_freebsd +++ b/check_mk_agent_freebsd @@ -168,7 +168,7 @@ mount -p -t ufs echo '<<>>' COLUMNS=10000 if [ "$is_jailed" = "0" ]; then - ps ax -o state,user,vsz,rss,pcpu,command | sed -e 1d -e '/\([^ ]*J\) */d' -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\2,\3,\4,\5) /' + ps ax -o user,vsz,rss,cputime,pid,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4,\5) /' else ps ax -o user,vsz,rss,pcpu,command | sed -e 1d -e 's/ *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) *\([^ ]*\) */(\1,\2,\3,\4) /' fi From 7e6a71024058cad450429f0c382fe0eab36f17d4 Mon Sep 17 00:00:00 2001 From: Robert Verspuy Date: Thu, 5 May 2016 10:27:30 +0200 Subject: [PATCH 029/497] Improved hddtemp agent module I had some issues with the netcat / daemon implementation of the module. netcat was stallingor sometimes netcat did not return the full output of hddtemp. Running hddtemp directly without running it as a daemon is much more stable for me. This new version also does not give any stdout output when hddtemp is not installed or when no disks can be found. Running the script manually on a server does give stderr output for easy debugging. --- agent-local/hddtemp | 41 +++++++++++++++++++++++++++++++++-------- 1 file changed, 33 insertions(+), 8 deletions(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index b6780396d..28d3724ba 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -1,10 +1,35 @@ #!/bin/bash -# hddtemp sensor readings -# needs hddtemp daemon listening on (at least) localhost -# requires netcat to be installed and in the path -# (c) 2012, Tom Laermans for Observium - -echo '<<>>' -nc localhost 7634 -echo +# LibreNMS agent to read HDD/SDD temperature using hddtemp +# +# Copyright (c) 2016 Exa-Omicron +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. Please see LICENSE.txt at the top level of +# the source code distribution for details. +# +# requires which, awk and sed + +# If disks are missing, they can be added here: +disks="/dev/hd? /dev/sd?" + +hddtemp=`which hddtemp 2>/dev/null` + +if [ "${hddtemp}" != "" ]; then + if [ -x "${hddtemp}" ]; then + content=`${hddtemp} -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/°/\|/g';` + if [ "${content}" != "" ]; then + echo '<<>>' + echo ${content} + echo + else + echo "no hddtemp compatible disks found" >&2 + fi + else + echo "hddtemp not executable" >&2 + fi +else + echo "hddtemp not installed" >&2 +fi From d514e95c9652a0d7adb6b539110984241f6a4197 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Wed, 8 Jun 2016 20:35:19 -0500 Subject: [PATCH 030/497] Local script to collect stats from rrdcached Being able to connect to local unix sockets is the primary advantage of this. --- agent-local/rrdcached | 45 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 agent-local/rrdcached diff --git a/agent-local/rrdcached b/agent-local/rrdcached new file mode 100755 index 000000000..0fca240fa --- /dev/null +++ b/agent-local/rrdcached @@ -0,0 +1,45 @@ +#!/usr/bin/env python +import socket +import sys +import os + +# Unix socket +server_address = '/var/run/rrdcached.sock' + +# TCP socket +#server_address = 'localhost:42217' + +sock = None +try: + if os.path.exists(server_address): + sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) + else: + sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + if ':' in server_address: + split = server_address.rsplit(':', 1) + server_address = (split[0],int(split[1])) + else: + server_address = (server_address, 42217) + + sock.connect(server_address) +except socket.error as e: + sys.stderr.write(str(e) + ': ' + str(server_address) + '\n') + sys.exit(1) + +buffer = '' +max = -1 +try: + sock.settimeout(5) + sock.sendall('STATS\n'.encode()) + while max == -1 or len(buffer.split('\n')) < max: + buffer += sock.recv(1024).decode() + if max == -1: + # the first line contains the number of following lines + max = int(buffer.split(' ')[0]) + 1 +except socket.error as e: + sys.stderr.write(str(e) + '\n') + sys.exit(1) + +sock.close() +print('<<>>') +print(buffer.rstrip('\n')) From 8bbf3362f5e93d2d169bee6adfeca111138addc0 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Wed, 29 Jun 2016 19:52:10 -0500 Subject: [PATCH 031/497] Python3 fixes for powerdns agent. Compatible with python2. --- agent-local/powerdns | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent-local/powerdns b/agent-local/powerdns index eaee4cd5e..acd251d78 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -11,12 +11,12 @@ vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', rvars = {} -for l in check_output(['/usr/bin/pdns_control', 'show', '*']).rstrip().split(','): +for l in check_output(['/usr/bin/pdns_control', 'show', '*']).decode().rstrip().split(','): v = l.split('=') if len(v) > 1: rvars[v[0]] = v[1] -print "<<>>" +print("<<>>") for k in vars: - print rvars[k] + print(rvars[k]) From a367018a08e5dbaeeee367e3e14e9344b147d0a7 Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Sat, 9 Jul 2016 19:12:13 +0100 Subject: [PATCH 032/497] Added nfsstats.sh file --- agent-local/nfsstats.sh | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 agent-local/nfsstats.sh diff --git a/agent-local/nfsstats.sh b/agent-local/nfsstats.sh new file mode 100644 index 000000000..dc8cf0234 --- /dev/null +++ b/agent-local/nfsstats.sh @@ -0,0 +1,28 @@ +#!/bin/bash +############################################################# +# - put this file on the nfs server somewhere like /opt/ # +# - edit your snmpd.conf and add line # +# extend nfsstat /opt/nfsstats.sh # +# - restart snmpd # +# - make sure that you have all the binaries required below # +############################################################# + +CFG_NFSVER='3' +BIN_NFSSTAT='/usr/sbin/nfsstat' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +BIN_GREP='/usr/bin/grep' +BIN_PASTE='/usr/bin/paste' +BIN_RM='/usr/bin/rm' +BIN_MV='/usr/bin/mv' +LOG_OLD='/tmp/nfsstats_old' +LOG_NEW='/tmp/nfsstats_new' + +$BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 + +$BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do + echo $(($a - $b)) +done + +$BIN_RM $LOG_OLD 2>&1 +$BIN_MV $LOG_NEW $LOG_OLD 2>&1 From 79a3a7ef6a88eed6a22ad3d5e1a99f82d13138c4 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Mon, 11 Jul 2016 17:06:14 +0200 Subject: [PATCH 033/497] Wrap these calls in an eval to prevent it from dying if its a container instead of a qemu vm. Fixes #28 --- agent-local/proxmox | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/agent-local/proxmox b/agent-local/proxmox index afa83beeb..e2663f915 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -52,6 +52,12 @@ print "$clustername\n"; foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")->{'data'}}) { my $vmid = $vm->{'vmid'}; - my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'name'}; - print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; + eval { + my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'name'}; + print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; + }; + eval { + my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'name'}; + print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; + }; }; From 5d7e1f95a321495c882424aca134b1687b0bf3a0 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 13 Jul 2016 15:06:57 +0200 Subject: [PATCH 034/497] Something like this @einarjh ? --- agent-local/proxmox | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/agent-local/proxmox b/agent-local/proxmox index e2663f915..09f0fb3bd 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -54,10 +54,18 @@ foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")->{'data'}}) { my $vmid = $vm->{'vmid'}; eval { my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'name'}; + my $tmpl = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'template'}; + if (defined($tmpl) && $tmpl == 1) { + die; + } print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; }; eval { - my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'name'}; + my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'hostname'}; + my $tmpl = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'template'}; + if (defined($tmpl) && $tmpl == 1) { + die; + } print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; }; }; From 2a3005aecfcbe6fe3ea9c3919c94fddf22790c5c Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Thu, 21 Jul 2016 21:31:25 -0500 Subject: [PATCH 035/497] Use mysqli instead of mysql --- agent-local/mysql | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) mode change 100755 => 100644 agent-local/mysql diff --git a/agent-local/mysql b/agent-local/mysql old mode 100755 new mode 100644 index 255952c95..4db91f689 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -270,18 +270,20 @@ function ss_get_mysql_stats( $options ) { # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysql') ) { + if (!extension_loaded('mysqli') ) { debug("The MySQL extension is not loaded"); die("The MySQL extension is not loaded"); } if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = mysql_connect($host_str, $user, $pass, true, MYSQL_CLIENT_SSL); + $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, + $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); } else { - $conn = mysql_connect($host_str, $user, $pass); + $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); } if (!$conn ) { - die("MySQL: " . mysql_error()); + die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : + (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); } $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); @@ -1123,16 +1125,16 @@ function to_int ( $str ) { function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysql_query($sql, $conn); + $result = @mysqli_query( $conn, $sql); if ($debug ) { - $error = @mysql_error($conn); + $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); if ($error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysql_fetch_array($result) ) { + while ( $row = @mysqli_fetch_array($result) ) { $array[] = $row; } debug(array($sql, $array)); @@ -1250,5 +1252,3 @@ function debug($val) { $debug_log = FALSE; } } - -?> From 2cfe5a1c29cdd1e39fecdb7d23d4bed6da28a68c Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Thu, 21 Jul 2016 22:26:02 -0500 Subject: [PATCH 036/497] Fix permissions --- agent-local/mysql | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 agent-local/mysql diff --git a/agent-local/mysql b/agent-local/mysql old mode 100644 new mode 100755 From 3121a03e071b5dbb5bb8d01b3738edcc32a473cd Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Thu, 21 Jul 2016 22:28:41 -0500 Subject: [PATCH 037/497] Copy nfsstats script from main repo. Send PR to remove scripts from the main repo. --- agent-local/nfsstats | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 agent-local/nfsstats diff --git a/agent-local/nfsstats b/agent-local/nfsstats new file mode 100755 index 000000000..dc8cf0234 --- /dev/null +++ b/agent-local/nfsstats @@ -0,0 +1,28 @@ +#!/bin/bash +############################################################# +# - put this file on the nfs server somewhere like /opt/ # +# - edit your snmpd.conf and add line # +# extend nfsstat /opt/nfsstats.sh # +# - restart snmpd # +# - make sure that you have all the binaries required below # +############################################################# + +CFG_NFSVER='3' +BIN_NFSSTAT='/usr/sbin/nfsstat' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +BIN_GREP='/usr/bin/grep' +BIN_PASTE='/usr/bin/paste' +BIN_RM='/usr/bin/rm' +BIN_MV='/usr/bin/mv' +LOG_OLD='/tmp/nfsstats_old' +LOG_NEW='/tmp/nfsstats_new' + +$BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 + +$BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do + echo $(($a - $b)) +done + +$BIN_RM $LOG_OLD 2>&1 +$BIN_MV $LOG_NEW $LOG_OLD 2>&1 From d8c46617d302c62557863c45b39ed7c314608dae Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Fri, 22 Jul 2016 15:22:48 -0500 Subject: [PATCH 038/497] Remove duplicate nfsstats file --- agent-local/nfsstats.sh | 28 ---------------------------- 1 file changed, 28 deletions(-) delete mode 100644 agent-local/nfsstats.sh diff --git a/agent-local/nfsstats.sh b/agent-local/nfsstats.sh deleted file mode 100644 index dc8cf0234..000000000 --- a/agent-local/nfsstats.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/bin/bash -############################################################# -# - put this file on the nfs server somewhere like /opt/ # -# - edit your snmpd.conf and add line # -# extend nfsstat /opt/nfsstats.sh # -# - restart snmpd # -# - make sure that you have all the binaries required below # -############################################################# - -CFG_NFSVER='3' -BIN_NFSSTAT='/usr/sbin/nfsstat' -BIN_TR='/usr/bin/tr' -BIN_CUT='/usr/bin/cut' -BIN_GREP='/usr/bin/grep' -BIN_PASTE='/usr/bin/paste' -BIN_RM='/usr/bin/rm' -BIN_MV='/usr/bin/mv' -LOG_OLD='/tmp/nfsstats_old' -LOG_NEW='/tmp/nfsstats_new' - -$BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 - -$BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do - echo $(($a - $b)) -done - -$BIN_RM $LOG_OLD 2>&1 -$BIN_MV $LOG_NEW $LOG_OLD 2>&1 From 952fd664bb0bce73646db5787e196d273924d1d1 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Fri, 22 Jul 2016 15:33:16 -0500 Subject: [PATCH 039/497] PowerDNS Recursor agent --- agent-local/powerdns-recursor | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100755 agent-local/powerdns-recursor diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor new file mode 100755 index 000000000..7c5fbf2ad --- /dev/null +++ b/agent-local/powerdns-recursor @@ -0,0 +1,12 @@ +#!/usr/bin/python +import json, subprocess + +input = subprocess.check_output(['rec_control', 'get-all']) +data = [] + +for line in input.splitlines(): + item = line.split() + data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + +print('<<>>') +print(json.dumps(data)) From 13eb0eaab5d00e7d62003cad2ab6f24453696241 Mon Sep 17 00:00:00 2001 From: "xavier.beaudouin" Date: Fri, 29 Jul 2016 13:23:20 +0200 Subject: [PATCH 040/497] /bin/bash => /usr/bin/env bash to allow freebsd agent work without patching each files --- agent-local/bind | 2 +- agent-local/dmi | 2 +- agent-local/hddtemp | 2 +- agent-local/nfsstats | 2 +- agent-local/temperature | 2 +- mk_enplug | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/agent-local/bind b/agent-local/bind index 75110d982..ed294e3e2 100755 --- a/agent-local/bind +++ b/agent-local/bind @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # (c) 2015, f0o@devilcode.org # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by diff --git a/agent-local/dmi b/agent-local/dmi index 58ec19725..d14012ffb 100755 --- a/agent-local/dmi +++ b/agent-local/dmi @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash echo '<<>>' diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 28d3724ba..eec057955 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # LibreNMS agent to read HDD/SDD temperature using hddtemp # diff --git a/agent-local/nfsstats b/agent-local/nfsstats index dc8cf0234..02a834cdb 100755 --- a/agent-local/nfsstats +++ b/agent-local/nfsstats @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ############################################################# # - put this file on the nfs server somewhere like /opt/ # # - edit your snmpd.conf and add line # diff --git a/agent-local/temperature b/agent-local/temperature index d00589751..8b1752fdf 100755 --- a/agent-local/temperature +++ b/agent-local/temperature @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # example to output some temperatures #echo "<<>>" #echo "/dev/sda:" `hddtemp /dev/hda -n` diff --git a/mk_enplug b/mk_enplug index db524ec2f..5abecb72f 100755 --- a/mk_enplug +++ b/mk_enplug @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash # Copyright (C) 2015 Mark Schouten # From f4b6c47da0e9947d3ebd41e7a322a97934b37775 Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 29 Jul 2016 20:19:41 +0300 Subject: [PATCH 041/497] added snmp extend script for nfs-v3-stats application --- snmp/nfs-stats.sh | 47 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 47 insertions(+) create mode 100644 snmp/nfs-stats.sh diff --git a/snmp/nfs-stats.sh b/snmp/nfs-stats.sh new file mode 100644 index 000000000..25bbb6b1e --- /dev/null +++ b/snmp/nfs-stats.sh @@ -0,0 +1,47 @@ +#!/bin/bash +############################################################ +# copy this file somewhere like /opt and chmod +x it # +# edit your snmpd.conf and add the below line and restart: # +# extend nfs-stats /opt/nfs-stats.sh # +############################################################ +CFG_NFSFILE='/proc/net/rpc/nfsd' +BIN_CAT='/usr/bin/cat' +BIN_SED='/usr/bin/sed' +BIN_AWK='/usr/bin/awk' +BIN_TR='/usr/bin/tr' +BIN_PASTE='/usr/bin/paste' +BIN_RM='/usr/bin/rm' +BIN_MV='/usr/bin/mv' +LOG_OLD='/tmp/nfsio_old' +LOG_NEW='/tmp/nfsio_new' +LOG_FIX='/tmp/nfsio_fix' + +#get reply cache (rc - values: hits, misses, nocache) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 1p | $BIN_AWK '{print $2,$3,$4}' | $BIN_TR " " "\n" > $LOG_NEW + +#get server file handle (fh - values: lookup, anon, ncachedir, ncachenondir, stale) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 2p | $BIN_AWK '{print $2,$3,$4,$5,$6}' | $BIN_TR " " "\n" >> $LOG_NEW + +#get io bytes (io - values: read, write) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 3p | $BIN_AWK '{print $2,$3}' | $BIN_TR " " "\n" >> $LOG_NEW + +#get read ahead cache (ra - values: cache_size, 0-10%, 10-20%, 20-30%, 30-40%, 40-50%, 50-60%, 60-70%, 70-80%, 80-90%, 90-100%, not-found) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 5p | $BIN_AWK '{print $3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13}' | $BIN_TR " " "\n" >> $LOG_NEW +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 5p | $BIN_AWK '{print $2}' > $LOG_FIX + +#get server packet stats (net - values: all reads, udp packets, tcp packets, tcp conn) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 6p | $BIN_AWK '{print $2,$3,$4,$5}' | $BIN_TR " " "\n" >> $LOG_NEW + +#get server rpc operations (rpc - values: calls, badcalls, badfmt, badauth, badclnt) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 7p | $BIN_AWK '{print $2,$3,$4,$5,$6}' | $BIN_TR " " "\n" >> $LOG_NEW + +#get nfs v3 stats (proc3 - values: null, getattr, setattr, lookup, access, readlink, read, write, create, mkdir, symlink, mknod, remove, rmdir, rename, link, readdir, readdirplus, fsstat, fsinfo, pathconf, commit) +$BIN_CAT $CFG_NFSFILE | $BIN_SED -n 8p | $BIN_AWK '{print $3,$4,$5,$6,$7,$8,$9,$10,$11,$12,$13,$14,$15,$16,$17,$18,$19,$20,$21,$22,$23,$24}' | $BIN_TR " " "\n" >> $LOG_NEW + +$BIN_PASTE $LOG_FIX +$BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do + echo $(($a-$b)) +done + +$BIN_RM $LOG_OLD 2>&1 +$BIN_MV $LOG_NEW $LOG_OLD 2>&1 From 814bfb7d35dc1b2a8516827576891931573d53f1 Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 29 Jul 2016 20:22:35 +0300 Subject: [PATCH 042/497] added snmp extend script for os-updates application --- snmp/os-updates.sh | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 snmp/os-updates.sh diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh new file mode 100755 index 000000000..092ccf75a --- /dev/null +++ b/snmp/os-updates.sh @@ -0,0 +1,45 @@ +#!/bin/bash +################################################################ +# copy this script to somewhere like /opt and make chmod +x it # +# edit your snmpd.conf and include # +# extend osupdate /opt/os-updates.sh # +# restart snmpd and activate the app for desired host # +################################################################ +BIN_AWK='/usr/bin/awk' +BIN_WC='/usr/bin/wc' +CMD_WC='-l' +BIN_ZYPPER='/usr/bin/zypper' +CMD_ZYPPER='lu' +BIN_YUM='/usr/bin/yum' +CMD_YUM='check-update' +BIN_APT='/usr/bin/apt' +CMD_APT='list --upgradable' + +#general check for os based on /etc/os-release +if [ -f /etc/os-release ]; then + OS=`$BIN_AWK -F= '/^ID=/{print $2}' /etc/os-release` + if [ $OS == "opensuse" ]; then + UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 3 ]; then + echo $(($UPDATES-3)); + else + echo "0"; + fi + elif [ $OS == "\"centos\"" ]; then + UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 6 ]; then + echo $(($UPDATES-6)); + else + echo "0"; + fi + elif [ $OS == "ubuntu" ]; then + UPDATES=`$BIN_APT $CMD_APT | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi + fi +else + echo "0"; +fi From 7e1f0b8b8fdb3ac6025ff481fd87f5cdff98605d Mon Sep 17 00:00:00 2001 From: "xavier.beaudouin" Date: Mon, 1 Aug 2016 11:15:52 +0200 Subject: [PATCH 043/497] Add FreeBSD detection to distro script --- snmp/distro | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/distro b/snmp/distro index 96cbbe75f..8a2ebd02e 100755 --- a/snmp/distro +++ b/snmp/distro @@ -86,6 +86,9 @@ elif [ "${OS}" = "Darwin" ] ; then if [ -f /usr/bin/sw_vers ] ; then OSSTR=`/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' '` fi + +elif [ "${OS}" = "FreeBSD" ] ; then + OSSTR=`/usr/bin/uname -mior` fi echo ${OSSTR} From a23d70b31d40de707a8ec0430ae762d71cc0e9c0 Mon Sep 17 00:00:00 2001 From: Florian Beer Date: Wed, 3 Aug 2016 04:51:35 +0200 Subject: [PATCH 044/497] Add Debian and make update call more robust - Debian based systems need to update the index before being able to report upgradable packages. - Debian old-stable doesn't have `apt` yet and Ubuntu 14.04 emits the following warning when using `apt` in a script: `WARNING: /usr/bin/apt does not have a stable CLI interface yet. Use with caution in scripts.` By using `apt-get`, issuing a `update` call first and then counting the result of `grep 'Inst'`, this script now works on Debian 7, Debian 8, Ubuntu 14.04 and Ubuntu 16.04. --- snmp/os-updates.sh | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 092ccf75a..cd8201c35 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -12,8 +12,9 @@ BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' CMD_YUM='check-update' -BIN_APT='/usr/bin/apt' -CMD_APT='list --upgradable' +BIN_APT='/usr/bin/apt-get' +CMD_APT='-s upgrade' +CMD_UPDATE='-qq update' #general check for os based on /etc/os-release if [ -f /etc/os-release ]; then @@ -32,10 +33,11 @@ if [ -f /etc/os-release ]; then else echo "0"; fi - elif [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_WC $CMD_WC` + elif [ $OS == "ubuntu" ] || [ $OS == "debian" ]; then + `$BIN_APT $CMD_UPDATE` + UPDATES=`$BIN_APT $CMD_APT | grep 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); + echo $UPDATES; else echo "0"; fi From f64178bcecbe4948369ad1de26068c21dfee6ef7 Mon Sep 17 00:00:00 2001 From: Florian Beer Date: Wed, 3 Aug 2016 12:16:22 +0200 Subject: [PATCH 045/497] Remove update call as this requires root See discussion here https://github.com/librenms/librenms-agent/pull/40#issuecomment-237198796 --- snmp/os-updates.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index cd8201c35..63b827a8f 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -14,7 +14,6 @@ BIN_YUM='/usr/bin/yum' CMD_YUM='check-update' BIN_APT='/usr/bin/apt-get' CMD_APT='-s upgrade' -CMD_UPDATE='-qq update' #general check for os based on /etc/os-release if [ -f /etc/os-release ]; then @@ -34,7 +33,6 @@ if [ -f /etc/os-release ]; then echo "0"; fi elif [ $OS == "ubuntu" ] || [ $OS == "debian" ]; then - `$BIN_APT $CMD_UPDATE` UPDATES=`$BIN_APT $CMD_APT | grep 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then echo $UPDATES; From bee67b57fe4dc29c0ad25d0bed8a55d5545c94ed Mon Sep 17 00:00:00 2001 From: crcro Date: Wed, 3 Aug 2016 22:09:55 +0300 Subject: [PATCH 046/497] app-dhcp-stats snmp extend --- snmp/dhcp-status.sh | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100755 snmp/dhcp-status.sh diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh new file mode 100755 index 000000000..9169cd74e --- /dev/null +++ b/snmp/dhcp-status.sh @@ -0,0 +1,33 @@ +#!/bin/bash +################################################################ +# copy this script to somewhere like /opt and make chmod +x it # +# edit your snmpd.conf add the below line and restart snmpd # +# extend dhcpstats /opt/dhcp-status.sh # +################################################################ +FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' +BIN_CAT='/usr/bin/cat' +BIN_GREP='/usr/bin/grep' +BIN_TR='/usr/bin/tr' +BIN_SED='/usr/bin/sed' +BIN_SORT='/usr/bin/sort' +BIN_WC='/usr/bin/wc' +DHCP_LEASES='^lease' +DHCP_ACTIVE='^lease|binding state active' +DHCP_EXPIRED='^lease|binding state expired' +DHCP_RELEASED='^lease|binding state released' +DHCP_ABANDONED='^lease|binding state abandoned' +DHCP_RESET='^lease|binding state reset' +DHCP_BOOTP='^lease|binding state bootp' +DHCP_BACKUP='^lease|binding state backup' +DHCP_FREE='^lease|binding state free' +NO_ERROR='[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} binding' + +$BIN_CAT $FILE_DHCP | $BIN_GREP $DHCP_LEASES | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_ACTIVE" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_EXPIRED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_RELEASED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_ABANDONED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_RESET" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_BOOTP" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_BACKUP" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +$BIN_GREP -E "$DHCP_FREE" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l From 40138219696f8500ec78b0a07029c1024c0ebc8a Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Tue, 9 Aug 2016 11:40:01 -0500 Subject: [PATCH 047/497] Update distro to match the main repo file --- snmp/distro | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/snmp/distro b/snmp/distro index 8a2ebd02e..11fc5d507 100755 --- a/snmp/distro +++ b/snmp/distro @@ -67,10 +67,12 @@ elif [ "${OS}" = "Linux" ] ; then fi if [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then - LSB_DIST="`cat /etc/lsb-release | grep DISTRIB_ID | cut -d "=" -f2`" - LSB_REV="`cat /etc/lsb-release | grep DISTRIB_RELEASE | cut -d "=" -f2`" + LSB_DIST=$(lsb_release -si) + LSB_REV=$(lsb_release -sr) if [ "$LSB_DIST" != "" ] ; then DIST=$LSB_DIST + fi + if [ "$LSB_REV" != "" ] ; then REV=$LSB_REV fi fi From 741980ba191a51080a88a7e014955a6342ea27c9 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Tue, 9 Aug 2016 12:50:23 -0500 Subject: [PATCH 048/497] Do not detect os, detect package managers. Add pacman support. --- snmp/os-updates.sh | 60 ++++++++++++++++++++++++---------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 63b827a8f..09d5bbb96 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -5,41 +5,45 @@ # extend osupdate /opt/os-updates.sh # # restart snmpd and activate the app for desired host # ################################################################ -BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' -CMD_YUM='check-update' +CMD_YUM='-q check-update' BIN_APT='/usr/bin/apt-get' CMD_APT='-s upgrade' +BIN_PACMAN='/usr/bin/pacman' +CMD_PACMAN='-Sup' -#general check for os based on /etc/os-release -if [ -f /etc/os-release ]; then - OS=`$BIN_AWK -F= '/^ID=/{print $2}' /etc/os-release` - if [ $OS == "opensuse" ]; then - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 3 ]; then - echo $(($UPDATES-3)); - else - echo "0"; - fi - elif [ $OS == "\"centos\"" ]; then - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 6 ]; then - echo $(($UPDATES-6)); - else - echo "0"; - fi - elif [ $OS == "ubuntu" ] || [ $OS == "debian" ]; then - UPDATES=`$BIN_APT $CMD_APT | grep 'Inst' | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $UPDATES; - else - echo "0"; - fi - fi +if [ -f $BIN_APT ]; then + # Debian / Ubuntu + UPDATES=`$BIN_APT $CMD_APT | grep 'Inst' | $BIN_WC $CMD_WC` + echo $UPDATES; +elif [ -f $BIN_YUM ]; then + # CentOS / Redhat + UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_ZYPPER ]; then + # OpenSUSE + UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 3 ]; then + echo $(($UPDATES-3)); + else + echo "0"; + fi +elif [ -f $BIN_PACMAN ]; then + # Arch + UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi else - echo "0"; + echo "0"; fi From 1cca01f990103de18f5560f383cd91d320200c72 Mon Sep 17 00:00:00 2001 From: crcro Date: Sun, 14 Aug 2016 17:43:27 +0300 Subject: [PATCH 049/497] added snmp extend to get raspberry sensors --- snmp/raspberry.sh | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 snmp/raspberry.sh diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh new file mode 100644 index 000000000..39840949f --- /dev/null +++ b/snmp/raspberry.sh @@ -0,0 +1,34 @@ +#!/bin/bash +####################################### +# please read DOCS to succesfully get # +# raspberry sensors into your host # +####################################### +picmd='/usr/bin/vcgencmd' +pised='/bin/sed' +getTemp='measure_temp' +getVoltsCore='measure_volts core' +getVoltsRamC='measure_volts sdram_c' +getVoltsRamI='measure_volts sdram_i' +getVoltsRamP='measure_volts sdram_p' +getFreqArm='measure_clock arm' +getFreqCore='measure_clock core' +getStatusH264='codec_enabled H264' +getStatusMPG2='codec_enabled MPG2' +getStatusWVC1='codec_enabled WVC1' +getStatusMPG4='codec_enabled MPG4' +getStatusMJPG='codec_enabled MJPG' +getStatusWMV9='codec_enabled WMV9' + +sudo $picmd $getTemp | $pised 's|[^0-9.]||g' +sudo $picmd $getVoltsCore | $pised 's|[^0-9.]||g' +sudo $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' +sudo $picmd $getVoltsRamI | $pised 's|[^0-9.]||g' +sudo $picmd $getVoltsRamP | $pised 's|[^0-9.]||g' +sudo $picmd $getFreqArm | $pised 's/frequency(45)=//g' +sudo $picmd $getFreqCore | $pised 's/frequency(1)=//g' +sudo $picmd $getStatusH264 | $pised 's/H264=//g' +sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' +sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' +sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' +sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' +sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' From ff1450afba6f609add84bf50c2d0ab9f40cebfa1 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Mon, 15 Aug 2016 22:56:31 -0500 Subject: [PATCH 050/497] Copy ntp scripts from the main repo. --- snmp/ntp-client.php | 79 +++++++++++++++++++++++++++++++++++++++++ snmp/ntpd-server.php | 83 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 162 insertions(+) create mode 100755 snmp/ntp-client.php create mode 100755 snmp/ntpd-server.php diff --git a/snmp/ntp-client.php b/snmp/ntp-client.php new file mode 100755 index 000000000..9332791af --- /dev/null +++ b/snmp/ntp-client.php @@ -0,0 +1,79 @@ +#!/usr/bin/env php + +// + +// START SETTINGS /// +$ntpq = '/usr/sbin/ntpq'; +// Change this to true if you have clk_jitter, sys_jitter in the ntpq -c rv output +$newstats_style = false; +// END SETTINGS /// + +// DO NOT EDIT UNDER THIS LINE + +$cmd = shell_exec($ntpq." -c rv | grep '^offset'"); +if ($newstats_style) { + $cmd2 = shell_exec($ntpq." -c rv | grep '^clk_wander'"); +} +else { + $cmd2 = shell_exec($ntpq." -c rv | grep '^stability'"); +} + +$vars = array(); +$vars2 = array(); +$vars = explode(',', $cmd); +$vars2 = explode(',', $cmd2); + + +function doSNMPv2($vars, $vars2, $newstats_style) +{ + $ntp = array(); + foreach ($vars as $item => $value) { + if (!empty($value)) { + $temp = explode('=', $value); + if (isset($temp[1])) { + $ntp[trim($temp[0])] = trim($temp[1]); + } + } + } + + foreach ($vars2 as $item => $value) { + if (!empty($value)) { + $temp = explode('=', $value); + if (isset($temp[1])) { + $ntp[trim($temp[0])] = trim($temp[1]); + } + } + } + + $var = array(); + $var['offset'] = (isset($ntp['offset']) ? $ntp['offset'] : 'U'); + $var['frequency'] = (isset($ntp['frequency']) ? $ntp['frequency'] : 'U'); + if ($newstats_style) { + $var['jitter'] = (isset($ntp['clk_jitter']) ? $ntp['clk_jitter'] : 'U'); + $var['noise'] = (isset($ntp['sys_jitter']) ? $ntp['sys_jitter'] : 'U'); + $var['stability'] = (isset($ntp['clk_wander']) ? $ntp['clk_wander'] : 'U'); + } + else { + $var['jitter'] = (isset($ntp['jitter']) ? $ntp['jitter'] : 'U'); + $var['noise'] = (isset($ntp['noise']) ? $ntp['noise'] : 'U'); + $var['stability'] = (isset($ntp['stability']) ? $ntp['stability'] : 'U'); + } + + foreach ($var as $item => $count) { + echo $count."\n"; + } + +}//end doSNMPv2() + + +doSNMPv2($vars, $vars2, $newstats_style); diff --git a/snmp/ntpd-server.php b/snmp/ntpd-server.php new file mode 100755 index 000000000..aba7b1ffe --- /dev/null +++ b/snmp/ntpd-server.php @@ -0,0 +1,83 @@ +#!/usr/bin/env php + + +// START SETTINGS /// +$ntpq = '/usr/sbin/ntpq'; +$ntpdc = '/usr/sbin/ntpdc'; +// Change this to true if you have clk_jitter, sys_jitter in the ntpq -c rv output +$newstats_style = false; +// END SETTINGS /// + +// DO NOT EDIT UNDER THIS LINE + +$cmd = shell_exec($ntpq.' -c rv'); +$cmd2 = shell_exec($ntpdc.' -c iostats'); +$vars = array(); +$vars2 = array(); +$vars = explode(',', $cmd); +$vars2 = str_replace(' ', '', $cmd2); +$vars2 = explode("\n", $vars2); + + +function doSNMPv2($vars, $vars2, $newstats_style) +{ + $ntpd = array(); + foreach ($vars as $item => $value) { + if (!empty($value)) { + $temp = explode('=', $value); + if (isset($temp[1])) { + $ntpd[trim($temp[0])] = trim($temp[1]); + } + } + } + + foreach ($vars2 as $item => $value) { + if (!empty($value)) { + $temp = explode(':', $value); + if (isset($temp[1])) { + $ntpd[trim($temp[0])] = trim($temp[1]); + } + } + } + + $var = array(); + $var['stratum'] = (isset($ntpd['stratum']) ? $ntpd['stratum'] : 'U'); + $var['offset'] = (isset($ntpd['offset']) ? $ntpd['offset'] : 'U'); + $var['frequency'] = (isset($ntpd['frequency']) ? $ntpd['frequency'] : 'U'); + if ($newstats_style) { + $var['jitter'] = (isset($ntpd['clk_jitter']) ? $ntpd['clk_jitter'] : 'U'); + $var['noise'] = (isset($ntpd['sys_jitter']) ? $ntpd['sys_jitter'] : 'U'); + $var['stability'] = (isset($ntpd['clk_wander']) ? $ntpd['clk_wander'] : 'U'); + } + else { + $var['jitter'] = (isset($ntpd['jitter']) ? $ntpd['jitter'] : 'U'); + $var['noise'] = (isset($ntpd['noise']) ? $ntpd['noise'] : 'U'); + $var['stability'] = (isset($ntpd['stability']) ? $ntpd['stability'] : 'U'); + } + + $var['uptime'] = (isset($ntpd['timesincereset']) ? $ntpd['timesincereset'] : 'U'); + $var['buffer_recv'] = (isset($ntpd['receivebuffers']) ? $ntpd['receivebuffers'] : 'U'); + $var['buffer_free'] = (isset($ntpd['freereceivebuffers']) ? $ntpd['freereceivebuffers'] : 'U'); + $var['buffer_used'] = (isset($ntpd['usedreceivebuffers']) ? $ntpd['usedreceivebuffers'] : 'U'); + $var['packets_drop'] = (isset($ntpd['droppedpackets']) ? $ntpd['droppedpackets'] : 'U'); + $var['packets_ignore'] = (isset($ntpd['ignoredpackets']) ? $ntpd['ignoredpackets'] : 'U'); + $var['packets_recv'] = (isset($ntpd['receivedpackets']) ? $ntpd['receivedpackets'] : 'U'); + $var['packets_sent'] = (isset($ntpd['packetssent']) ? $ntpd['packetssent'] : 'U'); + foreach ($var as $item => $count) { + echo $count."\n"; + } + +}//end doSNMPv2() + + +doSNMPv2($vars, $vars2, $newstats_style); From 84f66d3f071908dbf8d7d9fb83bf508e12031418 Mon Sep 17 00:00:00 2001 From: Alan Gregory Date: Wed, 17 Aug 2016 09:36:39 -0300 Subject: [PATCH 051/497] Added unbound stats script --- agent-local/unbound.sh | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100755 agent-local/unbound.sh diff --git a/agent-local/unbound.sh b/agent-local/unbound.sh new file mode 100755 index 000000000..d9b378892 --- /dev/null +++ b/agent-local/unbound.sh @@ -0,0 +1,8 @@ +#!/bin/bash +unboundctl=`which unbound-control` +if [ "$?" != "0" ]; then +#Unbound control executable doesn't exist +exit +fi +echo '<<>>' +$unboundctl stats From 91b3f2b704a6b86a075e8adb75fa791f3cc31dc4 Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 22 Aug 2016 01:45:23 +0300 Subject: [PATCH 052/497] @paulgear recomandation --- snmp/dhcp-status.sh | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 9169cd74e..20bf2b66b 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -23,11 +23,8 @@ DHCP_FREE='^lease|binding state free' NO_ERROR='[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} binding' $BIN_CAT $FILE_DHCP | $BIN_GREP $DHCP_LEASES | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_ACTIVE" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_EXPIRED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_RELEASED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_ABANDONED" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_RESET" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_BOOTP" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_BACKUP" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -$BIN_GREP -E "$DHCP_FREE" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l + +for state in "$DHCP_ACTIVE" "$DHCP_EXPIRED" "$DHCP_RELEASED" "$DHCP_ABANDONED" "$DHCP_RESET" "$DHCP_BOOTP" "$DHCP_BACKUP" "$DHCP_FREE" +do + $BIN_GREP -E "$state" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l +done From f74de6d7e2cac44db9a4c83f3d576ba5397d0dfd Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Mon, 22 Aug 2016 21:35:13 -0500 Subject: [PATCH 053/497] SNMP extend scripts from the main repo --- snmp/apache-stats | 89 +++ snmp/apache-stats.py | 87 +++ snmp/mailscanner.php | 76 +++ snmp/mysql-stats | 131 ++++ snmp/mysql_stats.php | 1247 +++++++++++++++++++++++++++++++++++ snmp/nfs-stats.sh | 0 snmp/nginx-stats | 37 ++ snmp/postfix-queues | 13 + snmp/powerdns.php | 76 +++ snmp/raspberry.sh | 0 snmp/shoutcast.default.conf | 3 + snmp/shoutcast.php | 122 ++++ 12 files changed, 1881 insertions(+) create mode 100755 snmp/apache-stats create mode 100755 snmp/apache-stats.py create mode 100755 snmp/mailscanner.php create mode 100755 snmp/mysql-stats create mode 100755 snmp/mysql_stats.php mode change 100644 => 100755 snmp/nfs-stats.sh create mode 100755 snmp/nginx-stats create mode 100755 snmp/postfix-queues create mode 100755 snmp/powerdns.php mode change 100644 => 100755 snmp/raspberry.sh create mode 100644 snmp/shoutcast.default.conf create mode 100755 snmp/shoutcast.php diff --git a/snmp/apache-stats b/snmp/apache-stats new file mode 100755 index 000000000..863514aea --- /dev/null +++ b/snmp/apache-stats @@ -0,0 +1,89 @@ +#!/usr/bin/env perl +# Original python script Copyright (C) 2009 Glen Pitt-Pladdy +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +use LWP::Simple; + +$CACHETIME = 30; +$CACHEFILE = '/tmp/snmp-cache-apache'; + +# check for cache file newer CACHETIME seconds ago +if ( -f $CACHEFILE && time - (stat( $CACHEFILE ))[9] < $CACHETIME) { + # use cached data +#print "Using cached data from file $CACHEFILE.\n"; + open (INFILE, "<$CACHEFILE" ) + or die "File open failure: $CACHEFILE\n"; + @data = ; + close INFILE; +} else { + # grab the status URL (fresh data) + @data = split /(\n)/, LWP::Simple::get( 'http://localhost/server-status?auto' ) + or die "Data fetch failure.\n"; + + # write file + $tmpfile = "$CACHEFILE.TMP.$PID"; + open (OUTFILE, ">$tmpfile") + or die "File open failure: $tmpfile\n"; + print OUTFILE @data; + close OUTFILE; + rename ( $tmpfile, $CACHEFILE ); +} + +# dice up the data +@scoreboardkey = ( '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ); +%params = {}; +foreach $line (@data) { + chomp $line; + @fields = split( /: /, $line); + if ($fields[0] eq 'Scoreboard') { + # count up the scoreboard into states + %states = {}; + foreach $state (@scoreboardkey) { + $states{$state} = 0; + } + foreach $state ( split(//, $fields[1]) ) { + $states{$state}++; + } + } elsif ($fields[0] eq 'Total kBytes') { + # turn into base (byte) value + $params{$fields[0]} = int($fields[1])*1024; + } else { + # just store everything else + $params{$fields[0]} = $fields[1]; + } +} + +# output the data in order (this is because some platforms don't have them all) +@dataorder = ( + 'Total Accesses', + 'Total kBytes', + 'CPULoad', + 'Uptime', + 'ReqPerSec', + 'BytesPerSec', + 'BytesPerReq', + 'BusyServers', + 'IdleServers' +); +foreach $param (@dataorder) { + if (exists $params{$param}) { + print $params{$param}."\n"; + } else { + # not all Apache's have all stats + print "U\n"; + } +} + +# print the scoreboard +foreach $state (@scoreboardkey) { + print $states{$state}."\n"; +} diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py new file mode 100755 index 000000000..891bd196e --- /dev/null +++ b/snmp/apache-stats.py @@ -0,0 +1,87 @@ +#!/usr/bin/python +# Copyright (C) 2009 Glen Pitt-Pladdy +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# +# + +CACHETIME = 30 +CACHEFILE = '/tmp/apache-snmp' + +# check for cache file newer CACHETIME seconds ago +import os +import time +if os.path.isfile ( CACHEFILE ) \ + and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: + # use cached data + f = open ( CACHEFILE, 'r' ) + data = f.read() + f.close() +else: + # grab the status URL (fresh data) + # need debian package python-urlgrabber + from urlgrabber import urlread + data = urlread ( 'http://localhost/server-status?auto', + user_agent = 'SNMP Apache Stats' ) + # write file + f = open ( CACHEFILE+'.TMP.'+`os.getpid()`, 'w' ) + f.write ( data ) + f.close() + os.rename ( CACHEFILE+'.TMP.'+`os.getpid()`, CACHEFILE ) + + +# dice up the data +scoreboardkey = [ '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ] +params = {} +for line in data.splitlines(): + fields = line.split( ': ' ) + if fields[0] == 'Scoreboard': + # count up the scoreboard into states + states = {} + for state in scoreboardkey: + states[state] = 0 + for state in fields[1]: + states[state] += 1 + elif fields[0] == 'Total kBytes': + # turn into base (byte) value + params[fields[0]] = int(fields[1])*1024 + else: + # just store everything else + params[fields[0]] = fields[1] + +# output the data in order (this is because some platforms don't have them all) +dataorder = [ + 'Total Accesses', + 'Total kBytes', + 'CPULoad', + 'Uptime', + 'ReqPerSec', + 'BytesPerSec', + 'BytesPerReq', + 'BusyWorkers', + 'IdleWorkers' +] +for param in dataorder: + try: + print params[param] +# print param + except: # not all Apache's have all stats + print 'U' + +# print the scoreboard +for state in scoreboardkey: + print states[state] +# print state diff --git a/snmp/mailscanner.php b/snmp/mailscanner.php new file mode 100755 index 000000000..b80fdb9ff --- /dev/null +++ b/snmp/mailscanner.php @@ -0,0 +1,76 @@ +#!/usr/bin/env php + + /// + /////////////////////////////////////////////////////////////////////////////////////// + + + // START SETTINGS /// + + $mailstats = "/opt/librenms/scripts/watchmaillog/watchmaillog_counters"; + + // END SETTINGS /// + + + /// + // DO NOT EDIT BENETH THIS LINE + /// + /////////////////////////////////////////////////////////////////////////////////////// + + function doSNMPv2($vars) { + $stats = array(); + if (file_exists($vars)) { + $data = file($vars); + foreach ($data as $item=>$value) { + if (!empty($value)) { + $temp = explode(':', trim($value)); + if (isset($temp[1])) { + $stats[$temp[0]] = $temp[1]; + } + } + } + } + $var = array(); + $var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : "U"); + $var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : "U"); + $var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : "U"); + $var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : "U"); + $var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : "U"); + $var['spam'] = (isset($stats['spam']) ? $stats['spam'] : "U"); + $var['virus'] = (isset($stats['virus']) ? $stats['virus'] : "U"); + foreach ($var as $item=>$count) { + echo $count."\n"; + } + } + + function clearStats($mailstats) { + if (file_exists($mailstats)) { + $fp = fopen($mailstats, 'w'); + fwrite($fp, "mess_recv:0\n"); + fwrite($fp, "mess_rejected:0\n"); + fwrite($fp, "mess_relay:0\n"); + fwrite($fp, "mess_sent:0\n"); + fwrite($fp, "mess_waiting:0\n"); + fwrite($fp, "spam:0\n"); + fwrite($fp, "virus:0\n"); + fclose($fp); + } + } + + doSNMPv2($mailstats); + //clearStats($mailstats); + +?> diff --git a/snmp/mysql-stats b/snmp/mysql-stats new file mode 100755 index 000000000..d191bdbbc --- /dev/null +++ b/snmp/mysql-stats @@ -0,0 +1,131 @@ +#!/usr/bin/env python2 +import warnings +import re +warnings.filterwarnings(action="ignore", message='the sets module is deprecated') +import sets +import MySQLdb +import base64 +conn = MySQLdb.connect(host='', + user='', + passwd='', + db='') + +cursor = conn.cursor () + + +cursor.execute ("SHOW GLOBAL STATUS") +rows = cursor.fetchall() + +datavariables = { + 'Command Counters': ['Com_delete','Com_insert','Com_insert_select','Com_load','Com_replace','Com_replace_select', 'Com_select', 'Com_update', 'Com_update_multi'], + 'Connections': ['max_connections', 'Max_used_connections', 'Aborted_clients', 'Aborted_connects','Threads_connected','Connections'], + 'Files and Tables': ['table_open_cache','Open_files','Open_tables','Opened_tables'], + 'InnoDB Buffer Pool': ['ib_bpool_size','ib_bpool_dbpages', 'ib_bpool_free','ib_bpool_modpages'], + 'InnoDB Buffer Pool Activity': ['ib_bpool_read','ib_bpool_created', 'ib_bpool_written'], + 'InnoDB Insert Buffer': ['ib_ibuf_inserts','ib_ibuf_merged_rec', 'ib_ibuf_merges'], + 'InnoDB IO': ['ib_io_read','ib_io_write','ib_io_log', 'ib_io_fsync'], + 'InnoDB IO Pending': ['ib_iop_log','ib_iop_sync', 'ib_iop_flush_log', 'ib_iop_flush_bpool', 'ib_iop_ibuf_aio','ib_iop_aioread','ib_iop_aiowrite'], + 'InnoDB Log': ['innodb_log_buffer_size','ib_log_flush','ib_log_written'], + 'InnoDB Row Operations': ['Innodb_rows_deleted','Innodb_rows_inserted','Innodb_rows_read','Innodb_rows_updated'], + 'InnoDB Semaphores': ['ib_spin_rounds','ib_spin_waits','ib_os_waits'], + 'InnoDB Transactions': ['ib_tnx'], + 'MyISAM Indexes': ['Key_read_requests','Key_reads','Key_write_requests','Key_writes'], + 'Network Traffic': ['Bytes_received','Bytes_sent'], + 'Query Cache': ['Qcache_queries_in_cache','Qcache_hits','Qcache_inserts','Qcache_not_cached','Qcache_lowmem_prunes'], + 'Query Cache Memory': ['query_cache_size','Qcache_free_memory'], + 'Select Types': ['Select_full_join','Select_full_range_join','Select_range','Select_range_check','Select_scan'], + 'Slow Queries': ['Slow_queries'], + 'Sorts': ['Sort_rows','Sort_range','Sort_merge_passes','Sort_scan'], + 'Table Locks': ['Table_locks_immediate','Table_locks_waited'], + 'Temporary Objects': ['Created_tmp_disk_tables','Created_tmp_tables','Created_tmp_files'] + } + +data = {} +for row in rows: + data[row[0]] = row[1] + +cursor = "" +cursor = conn.cursor () +cursor.execute ("SHOW VARIABLES") +rows = cursor.fetchall() + +for row in rows: + data[row[0]] = row[1] + +cursor = "" +cursor = conn.cursor() +cursor.execute("SHOW ENGINE INNODB STATUS") +rows = cursor.fetchall() + +for row in rows: + for line in row[2].split("\n"): + ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line) + ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line) + ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line) + ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line) + ib_b_reg = re.match(r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line) + ib_insert_buffer = re.match(r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line) + ib_io = re.match(r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", line) + ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line) + ib_io_p1 = re.match(r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line) + ib_io_p2 = re.match(r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", line) + ib_io_p3 = re.match(r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", line) + ib_log_p1 = re.match(r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line) + ib_log_p2 = re.match(r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line) + ib_semaphore = re.match(r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line) + ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line) + + if ib_bpool_size: + data['ib_bpool_size'] = ib_bpool_size.group(1) + elif ib_bpool_free: + data['ib_bpool_free'] = ib_bpool_free.group(1) + elif ib_bpool_dbpages: + data['ib_bpool_dbpages'] = ib_bpool_dbpages.group(1) + elif ib_bpool_modpages: + data['ib_bpool_modpages'] = ib_bpool_modpages.group(1) + elif ib_insert_buffer: + data['ib_ibuf_inserts'] = ib_insert_buffer.group(1) + data['ib_ibuf_merged_rec'] = ib_insert_buffer.group(2) + data['ib_ibuf_merges'] = ib_insert_buffer.group(3) + elif ib_io: + data['ib_io_read'] = ib_io.group(1) + data['ib_io_write'] = ib_io.group(2) + data['ib_io_fsync'] = ib_io.group(3) + elif ib_io_log: + data['ib_io_log'] = ib_io_log.group(1) + elif ib_io_p1: + data['ib_iop_aioread'] = ib_io_p1.group(1) + data['ib_iop_aiowrite'] = ib_io_p1.group(2) + elif ib_io_p2: + data['ib_iop_ibuf_aio'] = ib_io_p2.group(1) + data['ib_iop_log'] = ib_io_p2.group(2) + data['ib_iop_sync'] = ib_io_p2.group(3) + elif ib_io_p3: + data['ib_iop_flush_log'] = ib_io_p3.group(1) + data['ib_iop_flush_bpool'] = ib_io_p3.group(2) + elif ib_log_p1: + data['ib_log_written'] = ib_log_p1.group(1) + if ib_log_p1.group(2): + data['ib_log_written'] = int(data['ib_log_written']) + int(ib_log_p1.group(2)) + elif ib_log_p2: + data['ib_log_flush'] = ib_log_p2.group(1) + if ib_log_p2.group(2): + data['ib_log_flush'] = int(data['ib_log_flush']) + int(ib_log_p2.group(2)) + elif ib_semaphore: + data['ib_spin_waits'] = ib_semaphore.group(1) + data['ib_spin_rounds'] = ib_semaphore.group(2) + data['ib_os_waits'] = ib_semaphore.group(3) + elif ib_tnx: + data['ib_tnx'] = ib_tnx.group(1) + if ib_tnx.group(2): + data['ib_tnx'] = int(data['ib_tnx']) + int(ib_tnx.group(2)) + elif ib_b_reg: + data['ib_bpool_read'] = ib_b_reg.group(1) + data['ib_bpool_created'] = ib_b_reg.group(2) + data['ib_bpool_written'] = ib_b_reg.group(3) + + +for category in datavariables: + for variable in datavariables[category]: + if variable in data: + print data[variable] diff --git a/snmp/mysql_stats.php b/snmp/mysql_stats.php new file mode 100755 index 000000000..89522a61e --- /dev/null +++ b/snmp/mysql_stats.php @@ -0,0 +1,1247 @@ + true, # Do you want to check InnoDB statistics? + 'master' => true, # Do you want to check binary logging? + 'slave' => true, # Do you want to check slave status? + 'procs' => true, # Do you want to check SHOW PROCESSLIST? +); + +$use_ss = FALSE; # Whether to use the script server or not +$debug = FALSE; # Define whether you want debugging behavior. +$debug_log = FALSE; # If $debug_log is a filename, it'll be used. + +# ============================================================================ +# You should not need to change anything below this line. +# ============================================================================ +$version = "1.1.7"; + +# ============================================================================ +# Include settings from an external config file (issue 39). +# ============================================================================ +if (file_exists(__FILE__ . '.cnf' ) ) { + require(__FILE__ . '.cnf'); +} + +# Make this a happy little script even when there are errors. +$no_http_headers = true; +ini_set('implicit_flush', false); # No output, ever. +if ($debug ) { + ini_set('display_errors', true); + ini_set('display_startup_errors', true); + ini_set('error_reporting', 2147483647); +} +else { + ini_set('error_reporting', E_ERROR); +} +ob_start(); # Catch all output such as notices of undefined array indexes. +function error_handler($errno, $errstr, $errfile, $errline) { + print("$errstr at $errfile line $errline\n"); + debug("$errstr at $errfile line $errline"); +} +# ============================================================================ +# Set up the stuff we need to be called by the script server. +# ============================================================================ +if ($use_ss ) { + if (file_exists( dirname(__FILE__) . "/../include/global.php") ) { + # See issue 5 for the reasoning behind this. + debug("including " . dirname(__FILE__) . "/../include/global.php"); + include_once(dirname(__FILE__) . "/../include/global.php"); + } + elseif (file_exists( dirname(__FILE__) . "/../include/config.php" ) ) { + # Some Cacti installations don't have global.php. + debug("including " . dirname(__FILE__) . "/../include/config.php"); + include_once(dirname(__FILE__) . "/../include/config.php"); + } +} + +# ============================================================================ +# Make sure we can also be called as a script. +# ============================================================================ +if (!isset($called_by_script_server)) { + debug($_SERVER["argv"]); + array_shift($_SERVER["argv"]); # Strip off this script's filename + $options = parse_cmdline($_SERVER["argv"]); + validate_options($options); + $result = ss_get_mysql_stats($options); + + debug($result); + if (!$debug ) { + # Throw away the buffer, which ought to contain only errors. + ob_end_clean(); + } + else { + ob_end_flush(); # In debugging mode, print out the errors. + } + + # Split the result up and extract only the desired parts of it. + $options['items'] = ""; + $wanted = explode(',', $options['items']); + $output = array(); + foreach ( explode(' ', $result) as $item ) { + if (in_array(substr($item, 0, 2), $wanted) ) { + $output[] = $item; + } + list($short, $val) = explode(":", $item); + echo(strtolower($short).":".strtolower($val)."\n"); + } + debug(array("Final result", $output)); + print(implode(' ', $output)); +} + +# ============================================================================ +# End "if file was not included" section. +# ============================================================================ +} + +# ============================================================================ +# Work around the lack of array_change_key_case in older PHP. +# ============================================================================ +if (!function_exists('array_change_key_case') ) { + function array_change_key_case($arr) { + $res = array(); + foreach ( $arr as $key => $val ) { + $res[strtolower($key)] = $val; + } + return $res; + } +} + +# ============================================================================ +# Validate that the command-line options are here and correct +# ============================================================================ +function validate_options($options) { + debug($options); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + # Required command-line options + foreach ( array() as $option ) { + if (!isset($options[$option]) || !$options[$option] ) { + usage("Required option --$option is missing"); + } + } + foreach ( $options as $key => $val ) { + if (!in_array($key, $opts) ) { + usage("Unknown option --$key"); + } + } +} + +# ============================================================================ +# Print out a brief usage summary +# ============================================================================ +function usage($message) { + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + + $usage = << --items [OPTION] + + --host Hostname to connect to; use host:port syntax to specify a port + Use :/path/to/socket if you want to connect via a UNIX socket + --items Comma-separated list of the items whose data you want + --user MySQL username; defaults to $mysql_user if not given + --pass MySQL password; defaults to $mysql_pass if not given + --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) + --nocache Do not cache results in a file + --port MySQL port; defaults to $mysql_port if not given + --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call + +EOF; + die($usage); +} + +# ============================================================================ +# Parse command-line arguments, in the format --arg value --arg value, and +# return them as an array ( arg => value ) +# ============================================================================ +function parse_cmdline( $args ) { + $result = array(); + $cur_arg = ''; + foreach ($args as $val) { + if (strpos($val, '--') === 0 ) { + if (strpos($val, '--no') === 0 ) { + # It's an option without an argument, but it's a --nosomething so + # it's OK. + $result[substr($val, 2)] = 1; + $cur_arg = ''; + } + elseif ($cur_arg ) { # Maybe the last --arg was an option with no arg + if ($cur_arg == '--user' || $cur_arg == '--pass' || $cur_arg == '--port' ) { + # Special case because Cacti will pass these without an arg + $cur_arg = ''; + } + else { + die("No arg: $cur_arg\n"); + } + } + else { + $cur_arg = $val; + } + } + else { + $result[substr($cur_arg, 2)] = $val; + $cur_arg = ''; + } + } + if ($cur_arg && ($cur_arg != '--user' && $cur_arg != '--pass' && $cur_arg != '--port') ) { + die("No arg: $cur_arg\n"); + } + debug($result); + return $result; +} + +# ============================================================================ +# This is the main function. Some parameters are filled in from defaults at the +# top of this file. +# ============================================================================ +function ss_get_mysql_stats( $options ) { + # Process connection options and connect to MySQL. + global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $poll_time, + $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + + # Connect to MySQL. + $user = isset($options['user']) ? $options['user'] : $mysql_user; + $pass = isset($options['pass']) ? $options['pass'] : $mysql_pass; + $port = isset($options['port']) ? $options['port'] : $mysql_port; + $host = isset($options['host']) ? $options['host'] : $mysql_host; + + $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + # If there is a port, or if it's a non-standard port, we add ":$port" to the + # hostname. + $host_str = $host + . $port != 3306 ? ":$port" : ''; + debug(array('connecting to', $host_str, $user, $pass)); + if (!extension_loaded('mysql') ) { + debug("The MySQL extension is not loaded"); + die("The MySQL extension is not loaded"); + } + if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { + $conn = mysql_connect($host_str, $user, $pass, true, MYSQL_CLIENT_SSL); + } + else { + $conn = mysql_connect($host_str, $user, $pass); + } + if (!$conn ) { + die("MySQL: " . mysql_error()); + } + + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); + $cache_file = "$cache_dir/$sanitized_host-mysql_cacti_stats.txt" + . $port != 3306 ? ":$port" : ''; + debug("Cache file is $cache_file"); + + # First, check the cache. + $fp = null; + if (!isset($options['nocache']) ) { + if ($fp = fopen($cache_file, 'a+') ) { + $locked = flock($fp, 1); # LOCK_SH + if ($locked ) { + if (filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() + && ($arr = file($cache_file)) + ) {# The cache file is good to use. + debug("Using the cache file"); + fclose($fp); + return $arr[0]; + } + else { + debug("The cache file seems too small or stale"); + # Escalate the lock to exclusive, so we can write to it. + if (flock($fp, 2) ) { # LOCK_EX + # We might have blocked while waiting for that LOCK_EX, and + # another process ran and updated it. Let's see if we can just + # return the data now: + if (filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() + && ($arr = file($cache_file)) + ) {# The cache file is good to use. + debug("Using the cache file"); + fclose($fp); + return $arr[0]; + } + ftruncate($fp, 0); # Now it's ready for writing later. + } + } + } + else { + debug("Couldn't lock the cache file, ignoring it."); + $fp = null; + } + } + } + else { + $fp = null; + debug("Couldn't open the cache file"); + } + + # Set up variables. + $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc + # Define some indexes so they don't cause errors with += operations. + 'relay_log_space' => null, + 'binary_log_space' => null, + 'current_transactions' => null, + 'locked_transactions' => null, + 'active_transactions' => null, + 'innodb_locked_tables' => null, + 'innodb_tables_in_use' => null, + 'innodb_lock_structs' => null, + 'innodb_lock_wait_secs' => null, + 'innodb_sem_waits' => null, + 'innodb_sem_wait_time_ms'=> null, + # Values for the 'state' column from SHOW PROCESSLIST (converted to + # lowercase, with spaces replaced by underscores) + 'State_closing_tables' => null, + 'State_copying_to_tmp_table' => null, + 'State_end' => null, + 'State_freeing_items' => null, + 'State_init' => null, + 'State_locked' => null, + 'State_login' => null, + 'State_preparing' => null, + 'State_reading_from_net' => null, + 'State_sending_data' => null, + 'State_sorting_result' => null, + 'State_statistics' => null, + 'State_updating' => null, + 'State_writing_to_net' => null, + 'State_none' => null, + 'State_other' => null, # Everything not listed above + ); + + # Get SHOW STATUS and convert the name-value array into a simple + # associative array. + $result = run_query("SHOW /*!50002 GLOBAL */ STATUS", $conn); + foreach ( $result as $row ) { + $status[$row[0]] = $row[1]; + } + + # Get SHOW VARIABLES and do the same thing, adding it to the $status array. + $result = run_query("SHOW VARIABLES", $conn); + foreach ( $result as $row ) { + $status[$row[0]] = $row[1]; + } + + # Get SHOW SLAVE STATUS, and add it to the $status array. + if ($chk_options['slave'] ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + $slave_status_rows_gotten = 0; + foreach ( $result as $row ) { + $slave_status_rows_gotten++; + # Must lowercase keys because different MySQL versions have different + # lettercase. + $row = array_change_key_case($row, CASE_LOWER); + $status['relay_log_space'] = $row['relay_log_space']; + $status['slave_lag'] = $row['seconds_behind_master']; + + # Check replication heartbeat, if present. + if ($heartbeat ) { + $result2 = run_query( + "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" + . " AS delay FROM $heartbeat WHERE id = 1", $conn); + $slave_delay_rows_gotten = 0; + foreach ( $result2 as $row2 ) { + $slave_delay_rows_gotten++; + if ($row2 && is_array($row2) + && array_key_exists('delay', $row2) ) + { + $status['slave_lag'] = $row2['delay']; + } + else { + debug("Couldn't get slave lag from $heartbeat"); + } + } + if ($slave_delay_rows_gotten == 0 ) { + debug("Got nothing from heartbeat query"); + } + } + + # Scale slave_running and slave_stopped relative to the slave lag. + $status['slave_running'] = ($row['slave_sql_running'] == 'Yes') + ? $status['slave_lag'] : 0; + $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') + ? 0 : $status['slave_lag']; + } + if ($slave_status_rows_gotten == 0 ) { + debug("Got nothing from SHOW SLAVE STATUS"); + } + } + + # Get SHOW MASTER STATUS, and add it to the $status array. + if ($chk_options['master'] + && array_key_exists('log_bin', $status) + && $status['log_bin'] == 'ON' + ) { # See issue #8 + $binlogs = array(0); + $result = run_query("SHOW MASTER LOGS", $conn); + foreach ( $result as $row ) { + $row = array_change_key_case($row, CASE_LOWER); + # Older versions of MySQL may not have the File_size column in the + # results of the command. Zero-size files indicate the user is + # deleting binlogs manually from disk (bad user! bad!). + if (array_key_exists('file_size', $row) && $row['file_size'] > 0 ) { + $binlogs[] = $row['file_size']; + } + } + if (count($binlogs)) { + $status['binary_log_space'] = to_int(array_sum($binlogs)); + } + } + + # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array + # too. + if ($chk_options['procs'] ) { + $result = run_query('SHOW PROCESSLIST', $conn); + foreach ( $result as $row ) { + $state = $row['State']; + if (is_null($state) ) { + $state = 'NULL'; + } + if ($state == '' ) { + $state = 'none'; + } + $state = str_replace(' ', '_', strtolower($state)); + if (array_key_exists("State_$state", $status) ) { + increment($status, "State_$state", 1); + } + else { + increment($status, "State_other", 1); + } + } + } + + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add + # those to the array too. + if ($chk_options['innodb'] + && array_key_exists('have_innodb', $status) + && $status['have_innodb'] == 'YES' + ) { + $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); + $istatus_text = $result[0]['Status']; + $istatus_vals = get_innodb_array($istatus_text); + + # Override values from InnoDB parsing with values from SHOW STATUS, + # because InnoDB status might not have everything and the SHOW STATUS is + # to be preferred where possible. + $overrides = array( + 'Innodb_buffer_pool_pages_data' => 'database_pages', + 'Innodb_buffer_pool_pages_dirty' => 'modified_pages', + 'Innodb_buffer_pool_pages_free' => 'free_pages', + 'Innodb_buffer_pool_pages_total' => 'pool_size', + 'Innodb_data_fsyncs' => 'file_fsyncs', + 'Innodb_data_pending_reads' => 'pending_normal_aio_reads', + 'Innodb_data_pending_writes' => 'pending_normal_aio_writes', + 'Innodb_os_log_pending_fsyncs' => 'pending_log_flushes', + 'Innodb_pages_created' => 'pages_created', + 'Innodb_pages_read' => 'pages_read', + 'Innodb_pages_written' => 'pages_written', + 'Innodb_rows_deleted' => 'rows_deleted', + 'Innodb_rows_inserted' => 'rows_inserted', + 'Innodb_rows_read' => 'rows_read', + 'Innodb_rows_updated' => 'rows_updated', + ); + + # If the SHOW STATUS value exists, override... + foreach ( $overrides as $key => $val ) { + if (array_key_exists($key, $status) ) { + debug("Override $key"); + $istatus_vals[$val] = $status[$key]; + } + } + + # Now copy the values into $status. + foreach ( $istatus_vals as $key => $val ) { + $status[$key] = $istatus_vals[$key]; + } + } + + # Make table_open_cache backwards-compatible (issue 63). + if (array_key_exists('table_open_cache', $status) ) { + $status['table_cache'] = $status['table_open_cache']; + } + + # Compute how much of the key buffer is used and unflushed (issue 127). + $status['Key_buf_bytes_used'] + = big_sub($status['key_buffer_size'], + big_multiply($status['Key_blocks_unused'], + $status['key_cache_block_size'])); + $status['Key_buf_bytes_unflushed'] + = big_multiply($status['Key_blocks_not_flushed'], + $status['key_cache_block_size']); + + if (array_key_exists('unflushed_log', $status) + && $status['unflushed_log'] + ) { + # TODO: I'm not sure what the deal is here; need to debug this. But the + # unflushed log bytes spikes a lot sometimes and it's impossible for it to + # be more than the log buffer. + debug("Unflushed log: $status[unflushed_log]"); + $status['unflushed_log'] + = max($status['unflushed_log'], $status['innodb_log_buffer_size']); + } + + # Define the variables to output. I use shortened variable names so maybe + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must + # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses + # it and uses it as a Perl variable. + $keys = array( + 'Key_read_requests' => 'a0', + 'Key_reads' => 'a1', + 'Key_write_requests' => 'a2', + 'Key_writes' => 'a3', + 'history_list' => 'a4', + 'innodb_transactions' => 'a5', + 'read_views' => 'a6', + 'current_transactions' => 'a7', + 'locked_transactions' => 'a8', + 'active_transactions' => 'a9', + 'pool_size' => 'aa', + 'free_pages' => 'ab', + 'database_pages' => 'ac', + 'modified_pages' => 'ad', + 'pages_read' => 'ae', + 'pages_created' => 'af', + 'pages_written' => 'ag', + 'file_fsyncs' => 'ah', + 'file_reads' => 'ai', + 'file_writes' => 'aj', + 'log_writes' => 'ak', + 'pending_aio_log_ios' => 'al', + 'pending_aio_sync_ios' => 'am', + 'pending_buf_pool_flushes' => 'an', + 'pending_chkp_writes' => 'ao', + 'pending_ibuf_aio_reads' => 'ap', + 'pending_log_flushes' => 'aq', + 'pending_log_writes' => 'ar', + 'pending_normal_aio_reads' => 'as', + 'pending_normal_aio_writes' => 'at', + 'ibuf_inserts' => 'au', + 'ibuf_merged' => 'av', + 'ibuf_merges' => 'aw', + 'spin_waits' => 'ax', + 'spin_rounds' => 'ay', + 'os_waits' => 'az', + 'rows_inserted' => 'b0', + 'rows_updated' => 'b1', + 'rows_deleted' => 'b2', + 'rows_read' => 'b3', + 'Table_locks_waited' => 'b4', + 'Table_locks_immediate' => 'b5', + 'Slow_queries' => 'b6', + 'Open_files' => 'b7', + 'Open_tables' => 'b8', + 'Opened_tables' => 'b9', + 'innodb_open_files' => 'ba', + 'open_files_limit' => 'bb', + 'table_cache' => 'bc', + 'Aborted_clients' => 'bd', + 'Aborted_connects' => 'be', + 'Max_used_connections' => 'bf', + 'Slow_launch_threads' => 'bg', + 'Threads_cached' => 'bh', + 'Threads_connected' => 'bi', + 'Threads_created' => 'bj', + 'Threads_running' => 'bk', + 'max_connections' => 'bl', + 'thread_cache_size' => 'bm', + 'Connections' => 'bn', + 'slave_running' => 'bo', + 'slave_stopped' => 'bp', + 'Slave_retried_transactions' => 'bq', + 'slave_lag' => 'br', + 'Slave_open_temp_tables' => 'bs', + 'Qcache_free_blocks' => 'bt', + 'Qcache_free_memory' => 'bu', + 'Qcache_hits' => 'bv', + 'Qcache_inserts' => 'bw', + 'Qcache_lowmem_prunes' => 'bx', + 'Qcache_not_cached' => 'by', + 'Qcache_queries_in_cache' => 'bz', + 'Qcache_total_blocks' => 'c0', + 'query_cache_size' => 'c1', + 'Questions' => 'c2', + 'Com_update' => 'c3', + 'Com_insert' => 'c4', + 'Com_select' => 'c5', + 'Com_delete' => 'c6', + 'Com_replace' => 'c7', + 'Com_load' => 'c8', + 'Com_update_multi' => 'c9', + 'Com_insert_select' => 'ca', + 'Com_delete_multi' => 'cb', + 'Com_replace_select' => 'cc', + 'Select_full_join' => 'cd', + 'Select_full_range_join' => 'ce', + 'Select_range' => 'cf', + 'Select_range_check' => 'cg', + 'Select_scan' => 'ch', + 'Sort_merge_passes' => 'ci', + 'Sort_range' => 'cj', + 'Sort_rows' => 'ck', + 'Sort_scan' => 'cl', + 'Created_tmp_tables' => 'cm', + 'Created_tmp_disk_tables' => 'cn', + 'Created_tmp_files' => 'co', + 'Bytes_sent' => 'cp', + 'Bytes_received' => 'cq', + 'innodb_log_buffer_size' => 'cr', + 'unflushed_log' => 'cs', + 'log_bytes_flushed' => 'ct', + 'log_bytes_written' => 'cu', + 'relay_log_space' => 'cv', + 'binlog_cache_size' => 'cw', + 'Binlog_cache_disk_use' => 'cx', + 'Binlog_cache_use' => 'cy', + 'binary_log_space' => 'cz', + 'innodb_locked_tables' => 'd0', + 'innodb_lock_structs' => 'd1', + 'State_closing_tables' => 'd2', + 'State_copying_to_tmp_table' => 'd3', + 'State_end' => 'd4', + 'State_freeing_items' => 'd5', + 'State_init' => 'd6', + 'State_locked' => 'd7', + 'State_login' => 'd8', + 'State_preparing' => 'd9', + 'State_reading_from_net' => 'da', + 'State_sending_data' => 'db', + 'State_sorting_result' => 'dc', + 'State_statistics' => 'dd', + 'State_updating' => 'de', + 'State_writing_to_net' => 'df', + 'State_none' => 'dg', + 'State_other' => 'dh', + 'Handler_commit' => 'di', + 'Handler_delete' => 'dj', + 'Handler_discover' => 'dk', + 'Handler_prepare' => 'dl', + 'Handler_read_first' => 'dm', + 'Handler_read_key' => 'dn', + 'Handler_read_next' => 'do', + 'Handler_read_prev' => 'dp', + 'Handler_read_rnd' => 'dq', + 'Handler_read_rnd_next' => 'dr', + 'Handler_rollback' => 'ds', + 'Handler_savepoint' => 'dt', + 'Handler_savepoint_rollback' => 'du', + 'Handler_update' => 'dv', + 'Handler_write' => 'dw', + # Some InnoDB stats added later... + 'innodb_tables_in_use' => 'dx', + 'innodb_lock_wait_secs' => 'dy', + 'hash_index_cells_total' => 'dz', + 'hash_index_cells_used' => 'e0', + 'total_mem_alloc' => 'e1', + 'additional_pool_alloc' => 'e2', + 'uncheckpointed_bytes' => 'e3', + 'ibuf_used_cells' => 'e4', + 'ibuf_free_cells' => 'e5', + 'ibuf_cell_count' => 'e6', + 'adaptive_hash_memory' => 'e7', + 'page_hash_memory' => 'e8', + 'dictionary_cache_memory' => 'e9', + 'file_system_memory' => 'ea', + 'lock_system_memory' => 'eb', + 'recovery_system_memory' => 'ec', + 'thread_hash_memory' => 'ed', + 'innodb_sem_waits' => 'ee', + 'innodb_sem_wait_time_ms' => 'ef', + 'Key_buf_bytes_unflushed' => 'eg', + 'Key_buf_bytes_used' => 'eh', + 'key_buffer_size' => 'ei', + 'Innodb_row_lock_time' => 'ej', + 'Innodb_row_lock_waits' => 'ek', + ); + + # Return the output. + $output = array(); + foreach ($keys as $key => $short ) { + # If the value isn't defined, return -1 which is lower than (most graphs') + # minimum value of 0, so it'll be regarded as a missing value. + $val = isset($status[$key]) ? $status[$key] : -1; + $output[] = "$short:$val"; + } + $result = implode(' ', $output); + if ($fp ) { + if (fwrite($fp, $result) === FALSE ) { + die("Can't write '$cache_file'"); + } + fclose($fp); + } + + return $result; + +} + +# ============================================================================ +# Given INNODB STATUS text, returns a key-value array of the parsed text. Each +# line shows a sample of the input for both standard InnoDB as you would find in +# MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note +# that extra leading spaces are ignored due to trim(). +# ============================================================================ +function get_innodb_array($text) { + $results = array( + 'spin_waits' => array(), + 'spin_rounds' => array(), + 'os_waits' => array(), + 'pending_normal_aio_reads' => null, + 'pending_normal_aio_writes' => null, + 'pending_ibuf_aio_reads' => null, + 'pending_aio_log_ios' => null, + 'pending_aio_sync_ios' => null, + 'pending_log_flushes' => null, + 'pending_buf_pool_flushes' => null, + 'file_reads' => null, + 'file_writes' => null, + 'file_fsyncs' => null, + 'ibuf_inserts' => null, + 'ibuf_merged' => null, + 'ibuf_merges' => null, + 'log_bytes_written' => null, + 'unflushed_log' => null, + 'log_bytes_flushed' => null, + 'pending_log_writes' => null, + 'pending_chkp_writes' => null, + 'log_writes' => null, + 'pool_size' => null, + 'free_pages' => null, + 'database_pages' => null, + 'modified_pages' => null, + 'pages_read' => null, + 'pages_created' => null, + 'pages_written' => null, + 'queries_inside' => null, + 'queries_queued' => null, + 'read_views' => null, + 'rows_inserted' => null, + 'rows_updated' => null, + 'rows_deleted' => null, + 'rows_read' => null, + 'innodb_transactions' => null, + 'unpurged_txns' => null, + 'history_list' => null, + 'current_transactions' => null, + 'hash_index_cells_total' => null, + 'hash_index_cells_used' => null, + 'total_mem_alloc' => null, + 'additional_pool_alloc' => null, + 'last_checkpoint' => null, + 'uncheckpointed_bytes' => null, + 'ibuf_used_cells' => null, + 'ibuf_free_cells' => null, + 'ibuf_cell_count' => null, + 'adaptive_hash_memory' => null, + 'page_hash_memory' => null, + 'dictionary_cache_memory' => null, + 'file_system_memory' => null, + 'lock_system_memory' => null, + 'recovery_system_memory' => null, + 'thread_hash_memory' => null, + 'innodb_sem_waits' => null, + 'innodb_sem_wait_time_ms' => null, + ); + $txn_seen = FALSE; + foreach ( explode("\n", $text) as $line ) { + $line = trim($line); + $row = preg_split('/ +/', $line); + + # SEMAPHORES + if (strpos($line, 'Mutex spin waits') === 0 ) { + # Mutex spin waits 79626940, rounds 157459864, OS waits 698719 + # Mutex spin waits 0, rounds 247280272495, OS waits 316513438 + $results['spin_waits'][] = to_int($row[3]); + $results['spin_rounds'][] = to_int($row[5]); + $results['os_waits'][] = to_int($row[8]); + } + elseif (strpos($line, 'RW-shared spins') === 0 ) { + # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 + $results['spin_waits'][] = to_int($row[2]); + $results['spin_waits'][] = to_int($row[8]); + $results['os_waits'][] = to_int($row[5]); + $results['os_waits'][] = to_int($row[11]); + } + elseif (strpos($line, 'seconds the semaphore:') > 0) { + # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: + increment($results, 'innodb_sem_waits', 1); + increment($results, + 'innodb_sem_wait_time_ms', to_int($row[9]) * 1000); + } + + # TRANSACTIONS + elseif (strpos($line, 'Trx id counter') === 0 ) { + # The beginning of the TRANSACTIONS section: start counting + # transactions + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + $txn_seen = TRUE; + } + elseif (strpos($line, 'Purge done for trx') === 0 ) { + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + $results['unpurged_txns'] + = big_sub($results['innodb_transactions'], $purged_to); + } + elseif (strpos($line, 'History list length') === 0 ) { + # History list length 132 + $results['history_list'] = to_int($row[3]); + } + elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 + increment($results, 'current_transactions', 1); + if (strpos($line, 'ACTIVE') > 0 ) { + increment($results, 'active_transactions', 1); + } + } + elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: + increment($results, 'innodb_lock_wait_secs', to_int($row[5])); + } + elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + # 1 read views open inside InnoDB + $results['read_views'] = to_int($row[0]); + } + elseif (strpos($line, 'mysql tables in use') === 0 ) { + # mysql tables in use 2, locked 2 + increment($results, 'innodb_tables_in_use', to_int($row[4])); + increment($results, 'innodb_locked_tables', to_int($row[6])); + } + elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + # 23 lock struct(s), heap size 3024, undo log entries 27 + # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 + # LOCK WAIT 2 lock struct(s), heap size 368 + if (strpos($line, 'LOCK WAIT') === 0 ) { + increment($results, 'innodb_lock_structs', to_int($row[2])); + increment($results, 'locked_transactions', 1); + } + else { + increment($results, 'innodb_lock_structs', to_int($row[0])); + } + } + + # FILE I/O + elseif (strpos($line, ' OS file reads, ') > 0 ) { + # 8782182 OS file reads, 15635445 OS file writes, 947800 OS fsyncs + $results['file_reads'] = to_int($row[0]); + $results['file_writes'] = to_int($row[4]); + $results['file_fsyncs'] = to_int($row[8]); + } + elseif (strpos($line, 'Pending normal aio reads:') === 0 ) { + # Pending normal aio reads: 0, aio writes: 0, + $results['pending_normal_aio_reads'] = to_int($row[4]); + $results['pending_normal_aio_writes'] = to_int($row[7]); + } + elseif (strpos($line, 'ibuf aio reads') === 0 ) { + # ibuf aio reads: 0, log i/o's: 0, sync i/o's: 0 + $results['pending_ibuf_aio_reads'] = to_int($row[3]); + $results['pending_aio_log_ios'] = to_int($row[6]); + $results['pending_aio_sync_ios'] = to_int($row[9]); + } + elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + # Pending flushes (fsync) log: 0; buffer pool: 0 + $results['pending_log_flushes'] = to_int($row[4]); + $results['pending_buf_pool_flushes'] = to_int($row[7]); + } + + # INSERT BUFFER AND ADAPTIVE HASH INDEX + elseif (strpos($line, 'Ibuf for space 0: size ') === 0 ) { + # Older InnoDB code seemed to be ready for an ibuf per tablespace. It + # had two lines in the output. Newer has just one line, see below. + # Ibuf for space 0: size 1, free list len 887, seg size 889, is not empty + # Ibuf for space 0: size 1, free list len 887, seg size 889, + $results['ibuf_used_cells'] = to_int($row[5]); + $results['ibuf_free_cells'] = to_int($row[9]); + $results['ibuf_cell_count'] = to_int($row[12]); + } + elseif (strpos($line, 'Ibuf: size ') === 0 ) { + # Ibuf: size 1, free list len 4634, seg size 4636, + $results['ibuf_used_cells'] = to_int($row[2]); + $results['ibuf_free_cells'] = to_int($row[6]); + $results['ibuf_cell_count'] = to_int($row[9]); + } + elseif (strpos($line, ' merged recs, ') > 0 ) { + # 19817685 inserts, 19817684 merged recs, 3552620 merges + $results['ibuf_inserts'] = to_int($row[0]); + $results['ibuf_merged'] = to_int($row[2]); + $results['ibuf_merges'] = to_int($row[5]); + } + elseif (strpos($line, 'Hash table size ') === 0 ) { + # In some versions of InnoDB, the used cells is omitted. + # Hash table size 4425293, used cells 4229064, .... + # Hash table size 57374437, node heap has 72964 buffer(s) <-- no used cells + $results['hash_index_cells_total'] = to_int($row[3]); + $results['hash_index_cells_used'] + = strpos($line, 'used cells') > 0 ? to_int($row[6]) : '0'; + } + + # LOG + elseif (strpos($line, " log i/o's done, ") > 0 ) { + # 3430041 log i/o's done, 17.44 log i/o's/second + # 520835887 log i/o's done, 17.28 log i/o's/second, 518724686 syncs, 2980893 checkpoints + # TODO: graph syncs and checkpoints + $results['log_writes'] = to_int($row[0]); + } + elseif (strpos($line, " pending log writes, ") > 0 ) { + # 0 pending log writes, 0 pending chkp writes + $results['pending_log_writes'] = to_int($row[0]); + $results['pending_chkp_writes'] = to_int($row[4]); + } + elseif (strpos($line, "Log sequence number") === 0 ) { + # This number is NOT printed in hex in InnoDB plugin. + # Log sequence number 13093949495856 //plugin + # Log sequence number 125 3934414864 //normal + $results['log_bytes_written'] + = isset($row[4]) + ? make_bigint($row[3], $row[4]) + : to_int($row[3]); + } + elseif (strpos($line, "Log flushed up to") === 0 ) { + # This number is NOT printed in hex in InnoDB plugin. + # Log flushed up to 13093948219327 + # Log flushed up to 125 3934414864 + $results['log_bytes_flushed'] + = isset($row[5]) + ? make_bigint($row[4], $row[5]) + : to_int($row[4]); + } + elseif (strpos($line, "Last checkpoint at") === 0 ) { + # Last checkpoint at 125 3934293461 + $results['last_checkpoint'] + = isset($row[4]) + ? make_bigint($row[3], $row[4]) + : to_int($row[3]); + } + + # BUFFER POOL AND MEMORY + elseif (strpos($line, "Total memory allocated") === 0 ) { + # Total memory allocated 29642194944; in additional pool allocated 0 + $results['total_mem_alloc'] = to_int($row[3]); + $results['additional_pool_alloc'] = to_int($row[8]); + } + elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + # Adaptive hash index 1538240664 (186998824 + 1351241840) + $results['adaptive_hash_memory'] = to_int($row[3]); + } + elseif (strpos($line, 'Page hash ') === 0 ) { + # Page hash 11688584 + $results['page_hash_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Dictionary cache ') === 0 ) { + # Dictionary cache 145525560 (140250984 + 5274576) + $results['dictionary_cache_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'File system ') === 0 ) { + # File system 313848 (82672 + 231176) + $results['file_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Lock system ') === 0 ) { + # Lock system 29232616 (29219368 + 13248) + $results['lock_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Recovery system ') === 0 ) { + # Recovery system 0 (0 + 0) + $results['recovery_system_memory'] = to_int($row[2]); + } + elseif (strpos($line, 'Threads ') === 0 ) { + # Threads 409336 (406936 + 2400) + $results['thread_hash_memory'] = to_int($row[1]); + } + elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + # innodb_io_pattern 0 (0 + 0) + $results['innodb_io_pattern_memory'] = to_int($row[1]); + } + elseif (strpos($line, "Buffer pool size ") === 0 ) { + # The " " after size is necessary to avoid matching the wrong line: + # Buffer pool size 1769471 + # Buffer pool size, bytes 28991012864 + $results['pool_size'] = to_int($row[3]); + } + elseif (strpos($line, "Free buffers") === 0 ) { + # Free buffers 0 + $results['free_pages'] = to_int($row[2]); + } + elseif (strpos($line, "Database pages") === 0 ) { + # Database pages 1696503 + $results['database_pages'] = to_int($row[2]); + } + elseif (strpos($line, "Modified db pages") === 0 ) { + # Modified db pages 160602 + $results['modified_pages'] = to_int($row[3]); + } + elseif (strpos($line, "Pages read ahead") === 0 ) { + # Must do this BEFORE the next test, otherwise it'll get fooled by this + # line from the new plugin (see samples/innodb-015.txt): + # Pages read ahead 0.00/s, evicted without access 0.06/s + # TODO: No-op for now, see issue 134. + } + elseif (strpos($line, "Pages read") === 0 ) { + # Pages read 15240822, created 1770238, written 21705836 + $results['pages_read'] = to_int($row[2]); + $results['pages_created'] = to_int($row[4]); + $results['pages_written'] = to_int($row[6]); + } + + # ROW OPERATIONS + elseif (strpos($line, 'Number of rows inserted') === 0 ) { + # Number of rows inserted 50678311, updated 66425915, deleted 20605903, read 454561562 + $results['rows_inserted'] = to_int($row[4]); + $results['rows_updated'] = to_int($row[6]); + $results['rows_deleted'] = to_int($row[8]); + $results['rows_read'] = to_int($row[10]); + } + elseif (strpos($line, " queries inside InnoDB, ") > 0 ) { + # 0 queries inside InnoDB, 0 queries in queue + $results['queries_inside'] = to_int($row[0]); + $results['queries_queued'] = to_int($row[4]); + } + } + + foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { + $results[$key] = to_int(array_sum($results[$key])); + } + $results['unflushed_log'] + = big_sub($results['log_bytes_written'], $results['log_bytes_flushed']); + $results['uncheckpointed_bytes'] + = big_sub($results['log_bytes_written'], $results['last_checkpoint']); + + +# foreach ($results as $key => $value) { +# echo(strtolower($key).":".strtolower($value)."\n"); +# } + + + return $results; +} + + +# ============================================================================ +# Returns a bigint from two ulint or a single hex number. This is tested in +# t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. +# ============================================================================ +function make_bigint ($hi, $lo = null) { + debug(array($hi, $lo)); + if (is_null($lo) ) { + # Assume it is a hex string representation. + return base_convert($hi, 16, 10); + } + else { + $hi = $hi ? $hi : '0'; # Handle empty-string or whatnot + $lo = $lo ? $lo : '0'; + return big_add(big_multiply($hi, 4294967296), $lo); + } +} + +# ============================================================================ +# Extracts the numbers from a string. You can't reliably do this by casting to +# an int, because numbers that are bigger than PHP's int (varies by platform) +# will be truncated. And you can't use sprintf(%u) either, because the maximum +# value that will return on some platforms is 4022289582. So this just handles +# them as a string instead. It extracts digits until it finds a non-digit and +# quits. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. +# ============================================================================ +function to_int ( $str ) { + debug($str); + global $debug; + preg_match('{(\d+)}', $str, $m); + if (isset($m[1]) ) { + return $m[1]; + } + elseif ($debug ) { + print_r(debug_backtrace()); + } + else { + return 0; + } +} + +# ============================================================================ +# Wrap mysql_query in error-handling, and instead of returning the result, +# return an array of arrays in the result. +# ============================================================================ +function run_query($sql, $conn) { + global $debug; + debug($sql); + $result = @mysql_query($sql, $conn); + if ($debug ) { + $error = @mysql_error($conn); + if ($error ) { + debug(array($sql, $error)); + die("SQLERR $error in $sql"); + } + } + $array = array(); + while ( $row = @mysql_fetch_array($result) ) { + $array[] = $row; + } + debug(array($sql, $array)); + return $array; +} + +# ============================================================================ +# Safely increments a value that might be null. +# ============================================================================ +function increment(&$arr, $key, $howmuch) { + debug(array($key, $howmuch)); + if (array_key_exists($key, $arr) && isset($arr[$key]) ) { + $arr[$key] = big_add($arr[$key], $howmuch); + } + else { + $arr[$key] = $howmuch; + } +} + +# ============================================================================ +# Multiply two big integers together as accurately as possible with reasonable +# effort. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_multiply ($left, $right, $force = null) { + if (function_exists("gmp_mul") && (is_null($force) || $force == 'gmp') ) { + debug(array('gmp_mul', $left, $right)); + return gmp_strval( gmp_mul( $left, $right )); + } + elseif (function_exists("bcmul") && (is_null($force) || $force == 'bc') ) { + debug(array('bcmul', $left, $right)); + return bcmul( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('sprintf', $left, $right)); + return sprintf("%.0f", $left * $right); + } +} + +# ============================================================================ +# Subtract two big integers as accurately as possible with reasonable effort. +# This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_sub ($left, $right, $force = null) { + debug(array($left, $right)); + if (is_null($left) ) { $left = 0; } + if (is_null($right) ) { $right = 0; } + if (function_exists("gmp_sub") && (is_null($force) || $force == 'gmp')) { + debug(array('gmp_sub', $left, $right)); + return gmp_strval( gmp_sub( $left, $right )); + } + elseif (function_exists("bcsub") && (is_null($force) || $force == 'bc')) { + debug(array('bcsub', $left, $right)); + return bcsub( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('to_int', $left, $right)); + return to_int($left - $right); + } +} + +# ============================================================================ +# Add two big integers together as accurately as possible with reasonable +# effort. This is tested in t/mysql_stats.php and copied, without tests, to +# ss_get_by_ssh.php. $force is for testability. +# ============================================================================ +function big_add ($left, $right, $force = null) { + if (is_null($left) ) { $left = 0; } + if (is_null($right) ) { $right = 0; } + if (function_exists("gmp_add") && (is_null($force) || $force == 'gmp')) { + debug(array('gmp_add', $left, $right)); + return gmp_strval( gmp_add( $left, $right )); + } + elseif (function_exists("bcadd") && (is_null($force) || $force == 'bc')) { + debug(array('bcadd', $left, $right)); + return bcadd( $left, $right ); + } + else { # Or $force == 'something else' + debug(array('to_int', $left, $right)); + return to_int($left + $right); + } +} + +# ============================================================================ +# Writes to a debugging log. +# ============================================================================ +function debug($val) { + global $debug_log; + if (!$debug_log ) { + return; + } + if ($fp = fopen($debug_log, 'a+') ) { + $trace = debug_backtrace(); + $calls = array(); + $i = 0; + $line = 0; + $file = ''; + foreach ( debug_backtrace() as $arr ) { + if ($i++ ) { + $calls[] = "$arr[function]() at $file:$line"; + } + $line = array_key_exists('line', $arr) ? $arr['line'] : '?'; + $file = array_key_exists('file', $arr) ? $arr['file'] : '?'; + } + if (!count($calls) ) { + $calls[] = "at $file:$line"; + } + fwrite($fp, date('Y-m-d h:i:s') . ' ' . implode(' <- ', $calls)); + fwrite($fp, "\n" . var_export($val, TRUE) . "\n"); + fclose($fp); + } + else { # Disable logging + print("Warning: disabling debug logging to $debug_log\n"); + $debug_log = FALSE; + } +} + +?> diff --git a/snmp/nfs-stats.sh b/snmp/nfs-stats.sh old mode 100644 new mode 100755 diff --git a/snmp/nginx-stats b/snmp/nginx-stats new file mode 100755 index 000000000..1cedca5ba --- /dev/null +++ b/snmp/nginx-stats @@ -0,0 +1,37 @@ +#!/usr/bin/env python2 +import urllib2 +import re + + +data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + + +dataorder = [ + "Active", + "Reading", + "Writing", + "Waiting", + "Requests" + ] + + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/snmp/postfix-queues b/snmp/postfix-queues new file mode 100755 index 000000000..dc1951cc1 --- /dev/null +++ b/snmp/postfix-queues @@ -0,0 +1,13 @@ +#!/bin/bash + +#Written by Valec 2006. Steal and share. +#Get postfix queue lengths + +#extend mailq /opt/observer/scripts/getmailq.sh + +QUEUES="incoming active deferred hold" + +for i in $QUEUES; do + COUNT=`qshape $i | grep TOTAL | awk '{print $2}'` + printf "$COUNT\n" +done diff --git a/snmp/powerdns.php b/snmp/powerdns.php new file mode 100755 index 000000000..14103124c --- /dev/null +++ b/snmp/powerdns.php @@ -0,0 +1,76 @@ +#!/usr/bin/env php + + +// START SETTINGS /// +$pdnscontrol = '/usr/bin/pdns_control'; +// END SETTINGS /// + +// DO NOT EDIT UNDER THIS LINE +// +$cmd = shell_exec($pdnscontrol.' show \*'); +$vars = array(); +$vars = explode(',', $cmd); + + +function doSNMP($vars) { + foreach ($vars as $item => $value) { + $value = trim($value); + if (!empty($value)) { + echo $value."\n"; + } + } + +}//end doSNMP() + +function doSNMPv2($vars) { + $pdns = array(); + foreach ($vars as $item => $value) { + if (!empty($value)) { + $temp = explode('=', $value); + if (isset($temp[1])) { + $pdns[$temp[0]] = $temp[1]; + } + } + } + + $var = array(); + $var['corrupt-packets'] = (isset($pdns['corrupt-packets']) ? $pdns['corrupt-packets'] : 'U'); + $var['deferred-cache-inserts'] = (isset($pdns['deferred-cache-inserts']) ? $pdns['deferred-cache-inserts'] : 'U'); + $var['deferred-cache-lookup'] = (isset($pdns['deferred-cache-lookup']) ? $pdns['deferred-cache-lookup'] : 'U'); + $var['latency'] = (isset($pdns['latency']) ? $pdns['latency'] : 'U'); + $var['packetcache-hit'] = (isset($pdns['packetcache-hit']) ? $pdns['packetcache-hit'] : 'U'); + $var['packetcache-miss'] = (isset($pdns['packetcache-miss']) ? $pdns['packetcache-miss'] : 'U'); + $var['packetcache-size'] = (isset($pdns['packetcache-size']) ? $pdns['packetcache-size'] : 'U'); + $var['qsize-q'] = (isset($pdns['qsize-q']) ? $pdns['qsize-q'] : 'U'); + $var['query-cache-hit'] = (isset($pdns['query-cache-hit']) ? $pdns['query-cache-hit'] : 'U'); + $var['query-cache-miss'] = (isset($pdns['query-cache-miss']) ? $pdns['query-cache-miss'] : 'U'); + $var['recursing-answers'] = (isset($pdns['recursing-answers']) ? $pdns['recursing-answers'] : 'U'); + $var['recursing-questions'] = (isset($pdns['recursing-questions']) ? $pdns['recursing-questions'] : 'U'); + $var['servfail-packets'] = (isset($pdns['servfail-packets']) ? $pdns['servfail-packets'] : 'U'); + $var['tcp-answers'] = (isset($pdns['tcp-answers']) ? $pdns['tcp-answers'] : 'U'); + $var['tcp-queries'] = (isset($pdns['tcp-queries']) ? $pdns['tcp-queries'] : 'U'); + $var['timedout-packets'] = (isset($pdns['timedout-packets']) ? $pdns['timedout-packets'] : 'U'); + $var['udp-answers'] = (isset($pdns['udp-answers']) ? $pdns['udp-answers'] : 'U'); + $var['udp-queries'] = (isset($pdns['udp-queries']) ? $pdns['udp-queries'] : 'U'); + $var['udp4-answers'] = (isset($pdns['udp4-answers']) ? $pdns['udp4-answers'] : 'U'); + $var['udp4-queries'] = (isset($pdns['udp4-queries']) ? $pdns['udp4-queries'] : 'U'); + $var['udp6-answers'] = (isset($pdns['udp6-answers']) ? $pdns['udp6-answers'] : 'U'); + $var['udp6-queries'] = (isset($pdns['udp6-queries']) ? $pdns['udp6-queries'] : 'U'); + foreach ($var as $item => $count) { + echo $count."\n"; + } + +}//end doSNMPv2() + + +doSNMPv2($vars); diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh old mode 100644 new mode 100755 diff --git a/snmp/shoutcast.default.conf b/snmp/shoutcast.default.conf new file mode 100644 index 000000000..d38367bab --- /dev/null +++ b/snmp/shoutcast.default.conf @@ -0,0 +1,3 @@ +server1:8050 +server2:8000 +server2:8010 diff --git a/snmp/shoutcast.php b/snmp/shoutcast.php new file mode 100755 index 000000000..4c588355a --- /dev/null +++ b/snmp/shoutcast.php @@ -0,0 +1,122 @@ +#!/usr/bin/env php + + /// + /////////////////////////////////////////////////////////////////////////////////////// + + + // START SETTINGS /// + + $config = "/opt/librenms/scripts/shoutcast.conf"; + $cache = "/opt/librenms/scripts/shoutcast.cache"; + + // END SETTINGS /// + + + /// + // DO NOT EDIT BENETH THIS LINE + /// + /////////////////////////////////////////////////////////////////////////////////////// + + /* Do NOT run this script through a web browser */ + if (!isset($_SERVER["argv"][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) { + die('This script is only meant to run at the command line.'); + } + + $cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : ""); + + function get_data($host, $port) { + $fp = @fsockopen($host, $port, $errno, $errstr, 5); + if(!$fp) { $connect = 0; } + if (!isset($connect)) { + fputs($fp, "GET /7.html HTTP/1.0\r\n" + . "User-Agent: All In One - SHOUTcast Stats Parser" + . " (Mozilla Compatible)\r\n\r\n"); + while (!feof($fp)) { + $rawdata = fgets($fp, 1024); + } + fclose($fp); + } + preg_match('/body>(.*)<\/body/', $rawdata, $matches); + $res = explode(',', $matches[1], 7); + $res[7] = $host; + $res[8] = $port; + return $res; + } + + function get_list($config) { + if (file_exists($config)) { + $servers = file($config); + $data = array(); + foreach ($servers as $item=>$server) { + list($host, $port) = explode(":", $server, 2); + array_push($data, get_data(trim($host), trim($port))); + } + return $data; + } + } + + function doSNMPv2($vars) { + $res = array(); + foreach ($vars as $items=>$server) { + $var = array(); + $var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : "0"); + //$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0"); + $var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : "0"); + $var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : "0"); + $var['current'] = (isset($server['0']) ? $server['0'] : "0"); + $var['status'] = (isset($server['1']) ? $server['1'] : "0"); + $var['peak'] = (isset($server['2']) ? $server['2'] : "0"); + $var['max'] = (isset($server['3']) ? $server['3'] : "0"); + $var['unique'] = (isset($server['4']) ? $server['4'] : "0"); + $host = (isset($server['7']) ? $server['7'] : "unknown"); + $port = (isset($server['8']) ? $server['8'] : "unknown"); + $tmp = $host.":".$port; + foreach ($var as $item=>$value) { + $tmp .= ";".$value; + } + array_push($res, $tmp); + } + return $res; + } + + function makeCacheFile($data, $cache) { + $fp = fopen($cache, 'w'); + foreach ($data as $item=>$value) { + fwrite($fp, $value."\n"); + } + fclose($fp); + } + + function readCacheFile($cache) { + if (file_exists($cache)) { + $data = file($cache); + foreach ($data as $item=>$value) { + echo trim($value)."\n"; + } + } + } + + if ($cmd == "makeCache") { + $servers = get_list($config); + $data = doSNMPv2($servers); + makeCacheFile($data, $cache); + } else { + readCacheFile($cache); + } + +?> From 3e436594bae5af16fdc76c785dcde8031166c6f0 Mon Sep 17 00:00:00 2001 From: Tatermen Date: Sun, 28 Aug 2016 20:06:04 +0100 Subject: [PATCH 054/497] Freeswitch (#53) feature: Added freeswitch support --- agent-local/freeswitch | 46 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 46 insertions(+) create mode 100755 agent-local/freeswitch diff --git a/agent-local/freeswitch b/agent-local/freeswitch new file mode 100755 index 000000000..81efd43ed --- /dev/null +++ b/agent-local/freeswitch @@ -0,0 +1,46 @@ +#!/bin/bash + +FSCLI=/usr/local/freeswitch/bin/fs_cli + +inRe="Inbound\(Failed\/Total\): ([0-9]+)\/([0-9]+)" +outRe="Outbound\(Failed\/Total\):([0-9]+)\/([0-9]+)" + +if [ -f $FSCLI ]; +then + $FSCLI -x "status" > /dev/null + if [ $? -ne 0 ]; then + # Freeswitch not running, silently exit. + exit 0 + fi + + PEAK=`$FSCLI -x "status" | grep 'per Sec' | grep -Po 'last 5min \d+' | cut -d ' ' -f 3` + CALLCOUNT=`$FSCLI -x "show calls count" | grep -Po '^\d+'` + CHANNELCOUNT=`$FSCLI -x "show channels count" | grep -Po '^\d+'` + GATEWAYS=`$FSCLI -x "sofia status gateway"` + if [[ $GATEWAYS =~ $inRe ]]; then + INFAILED=${BASH_REMATCH[1]} + INTOTAL=${BASH_REMATCH[2]} + else + INFAILED=0 + INTOTAL=0 + fi + if [[ $GATEWAYS =~ $outRe ]]; then + OUTFAILED=${BASH_REMATCH[1]} + OUTTOTAL=${BASH_REMATCH[2]} + else + OUTFAILED=0 + OUTTOTAL=0 + fi + + echo "<<>>" + echo "Calls=$CALLCOUNT" + echo "Channels=$CHANNELCOUNT" + echo "Peak=$PEAK" + echo "InFailed=$INFAILED" + echo "InTotal=$INTOTAL" + echo "OutFailed=$OUTFAILED" + echo "OutTotal=$OUTTOTAL" + +else + exit 0 +fi From a910bc944a42c36dcad07f127cb0efa6d61473cb Mon Sep 17 00:00:00 2001 From: crcro Date: Wed, 7 Sep 2016 22:37:23 +0300 Subject: [PATCH 055/497] app: ntp-server (#55) * ntp-server app using shell only, tested with 4.2.8p8 * fix for higher stratum value * change the description in comment to reflect latest webui push --- snmp/ntp-server.sh | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) create mode 100755 snmp/ntp-server.sh diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh new file mode 100755 index 000000000..dbf0fb99b --- /dev/null +++ b/snmp/ntp-server.sh @@ -0,0 +1,36 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to somewhere like /opt and make chmod +x it # +# edit your snmpd.conf and include # +# extend ntp-server /opt/ntp-server.sh # +# restart snmpd and activate the app for desired host # +# please make sure you have the path/binaries below # +################################################################ +# Binaries and paths required # +################################################################ +BIN_NTPQ='/usr/sbin/ntpq' +BIN_GREP='/usr/bin/grep' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +BIN_SED='/usr/bin/sed' +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +CMD0=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` +echo $CMD0 + +CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` +IFS=', ' read -r -a array <<< "$CMD1" + +for value in 2 3 4 5 6 +do + echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 +done + +CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +IFS=',' read -r -a array <<< "$CMD2" + +for value in 0 1 2 3 5 6 7 8 +do + echo ${array["$value"]} | $BIN_SED -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' +done From 8cc504183bbafe727cd056ad8ceb1b70a03f767f Mon Sep 17 00:00:00 2001 From: crcro Date: Wed, 7 Sep 2016 22:37:31 +0300 Subject: [PATCH 056/497] ntp-client app using shell only, tested with ntpq 4.2.8p8 (#54) --- snmp/ntp-client.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100755 snmp/ntp-client.sh diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh new file mode 100755 index 000000000..447c1f796 --- /dev/null +++ b/snmp/ntp-client.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to somewhere like /opt and make chmod +x it # +# edit your snmpd.conf and include # +# extend ntp-client /opt/ntp-client.sh # +# restart snmpd and activate the app for desired host # +# please make sure you have the path/binaries below # +################################################################ +# Binaries and paths required # +################################################################ +BIN_NTPQ='/usr/sbin/ntpq' +BIN_GREP='/usr/bin/grep' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` +IFS=', ' read -r -a array <<< "$CMD1" + +for value in 2 3 4 5 6 +do + echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 +done + From 7a820d4d6bfba49ce74f51b18f6a28d08125496c Mon Sep 17 00:00:00 2001 From: vectr0n Date: Fri, 9 Sep 2016 02:16:28 -0400 Subject: [PATCH 057/497] Update hddtemp to include hddtemp -w option (#56) hddtemp gives inconsistent values in it's current state, after some debugging I was able to resolve the issue by passing -w to the hddtemp command, this will wake-up the drive if it is in a sleep state to gather information. --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index eec057955..eb749cb0a 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -19,7 +19,7 @@ hddtemp=`which hddtemp 2>/dev/null` if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then - content=`${hddtemp} -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/°/\|/g';` + content=`${hddtemp} -w -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/°/\|/g';` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From e8047f7fa8e871cfe744c7a64e448726cbeccf1e Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 9 Sep 2016 15:36:01 +0300 Subject: [PATCH 058/497] snmp-extend-ups-nut --- snmp/ups-nut.sh | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) create mode 100755 snmp/ups-nut.sh diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh new file mode 100755 index 000000000..321ca9805 --- /dev/null +++ b/snmp/ups-nut.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to somewhere like /opt and make chmod +x it # +# edit your snmpd.conf and include # +# extend ups-nut /opt/ups-nut.sh # +# restart snmpd and activate the app for desired host # +# please make sure you have the path/binaries below # +################################################################ +# Binaries and paths required # +################################################################ +BIN_UPSC='/usr/bin/upsc' +UPSC_CMD='APCUPS' +BIN_SED='/usr/bin/sed' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +CMD1=`$BIN_UPSC $UPSC_CMD | $BIN_SED "1 d" | $BIN_TR '\n' '|' | $BIN_TR -d ' '` +IFS='|' read -r -a array <<< "$CMD1" + +for value in 0 1 5 8 11 12 25 26 31 +do + echo ${array["$value"]} | $BIN_CUT -d ":" -f 2 +done From edf24665aaa66385d06ce58cc261776ac55423b3 Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 10 Sep 2016 04:48:55 +0300 Subject: [PATCH 059/497] remove obsolete ntp-client.php script --- snmp/ntp-client.php | 79 --------------------------------------------- 1 file changed, 79 deletions(-) delete mode 100755 snmp/ntp-client.php diff --git a/snmp/ntp-client.php b/snmp/ntp-client.php deleted file mode 100755 index 9332791af..000000000 --- a/snmp/ntp-client.php +++ /dev/null @@ -1,79 +0,0 @@ -#!/usr/bin/env php - -// - -// START SETTINGS /// -$ntpq = '/usr/sbin/ntpq'; -// Change this to true if you have clk_jitter, sys_jitter in the ntpq -c rv output -$newstats_style = false; -// END SETTINGS /// - -// DO NOT EDIT UNDER THIS LINE - -$cmd = shell_exec($ntpq." -c rv | grep '^offset'"); -if ($newstats_style) { - $cmd2 = shell_exec($ntpq." -c rv | grep '^clk_wander'"); -} -else { - $cmd2 = shell_exec($ntpq." -c rv | grep '^stability'"); -} - -$vars = array(); -$vars2 = array(); -$vars = explode(',', $cmd); -$vars2 = explode(',', $cmd2); - - -function doSNMPv2($vars, $vars2, $newstats_style) -{ - $ntp = array(); - foreach ($vars as $item => $value) { - if (!empty($value)) { - $temp = explode('=', $value); - if (isset($temp[1])) { - $ntp[trim($temp[0])] = trim($temp[1]); - } - } - } - - foreach ($vars2 as $item => $value) { - if (!empty($value)) { - $temp = explode('=', $value); - if (isset($temp[1])) { - $ntp[trim($temp[0])] = trim($temp[1]); - } - } - } - - $var = array(); - $var['offset'] = (isset($ntp['offset']) ? $ntp['offset'] : 'U'); - $var['frequency'] = (isset($ntp['frequency']) ? $ntp['frequency'] : 'U'); - if ($newstats_style) { - $var['jitter'] = (isset($ntp['clk_jitter']) ? $ntp['clk_jitter'] : 'U'); - $var['noise'] = (isset($ntp['sys_jitter']) ? $ntp['sys_jitter'] : 'U'); - $var['stability'] = (isset($ntp['clk_wander']) ? $ntp['clk_wander'] : 'U'); - } - else { - $var['jitter'] = (isset($ntp['jitter']) ? $ntp['jitter'] : 'U'); - $var['noise'] = (isset($ntp['noise']) ? $ntp['noise'] : 'U'); - $var['stability'] = (isset($ntp['stability']) ? $ntp['stability'] : 'U'); - } - - foreach ($var as $item => $count) { - echo $count."\n"; - } - -}//end doSNMPv2() - - -doSNMPv2($vars, $vars2, $newstats_style); From c085fcbe4732a1975e23a01ca4fde5852c331db4 Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 10 Sep 2016 04:50:33 +0300 Subject: [PATCH 060/497] remove obsolete ntpd-server.php --- snmp/ntpd-server.php | 83 -------------------------------------------- 1 file changed, 83 deletions(-) delete mode 100755 snmp/ntpd-server.php diff --git a/snmp/ntpd-server.php b/snmp/ntpd-server.php deleted file mode 100755 index aba7b1ffe..000000000 --- a/snmp/ntpd-server.php +++ /dev/null @@ -1,83 +0,0 @@ -#!/usr/bin/env php - - -// START SETTINGS /// -$ntpq = '/usr/sbin/ntpq'; -$ntpdc = '/usr/sbin/ntpdc'; -// Change this to true if you have clk_jitter, sys_jitter in the ntpq -c rv output -$newstats_style = false; -// END SETTINGS /// - -// DO NOT EDIT UNDER THIS LINE - -$cmd = shell_exec($ntpq.' -c rv'); -$cmd2 = shell_exec($ntpdc.' -c iostats'); -$vars = array(); -$vars2 = array(); -$vars = explode(',', $cmd); -$vars2 = str_replace(' ', '', $cmd2); -$vars2 = explode("\n", $vars2); - - -function doSNMPv2($vars, $vars2, $newstats_style) -{ - $ntpd = array(); - foreach ($vars as $item => $value) { - if (!empty($value)) { - $temp = explode('=', $value); - if (isset($temp[1])) { - $ntpd[trim($temp[0])] = trim($temp[1]); - } - } - } - - foreach ($vars2 as $item => $value) { - if (!empty($value)) { - $temp = explode(':', $value); - if (isset($temp[1])) { - $ntpd[trim($temp[0])] = trim($temp[1]); - } - } - } - - $var = array(); - $var['stratum'] = (isset($ntpd['stratum']) ? $ntpd['stratum'] : 'U'); - $var['offset'] = (isset($ntpd['offset']) ? $ntpd['offset'] : 'U'); - $var['frequency'] = (isset($ntpd['frequency']) ? $ntpd['frequency'] : 'U'); - if ($newstats_style) { - $var['jitter'] = (isset($ntpd['clk_jitter']) ? $ntpd['clk_jitter'] : 'U'); - $var['noise'] = (isset($ntpd['sys_jitter']) ? $ntpd['sys_jitter'] : 'U'); - $var['stability'] = (isset($ntpd['clk_wander']) ? $ntpd['clk_wander'] : 'U'); - } - else { - $var['jitter'] = (isset($ntpd['jitter']) ? $ntpd['jitter'] : 'U'); - $var['noise'] = (isset($ntpd['noise']) ? $ntpd['noise'] : 'U'); - $var['stability'] = (isset($ntpd['stability']) ? $ntpd['stability'] : 'U'); - } - - $var['uptime'] = (isset($ntpd['timesincereset']) ? $ntpd['timesincereset'] : 'U'); - $var['buffer_recv'] = (isset($ntpd['receivebuffers']) ? $ntpd['receivebuffers'] : 'U'); - $var['buffer_free'] = (isset($ntpd['freereceivebuffers']) ? $ntpd['freereceivebuffers'] : 'U'); - $var['buffer_used'] = (isset($ntpd['usedreceivebuffers']) ? $ntpd['usedreceivebuffers'] : 'U'); - $var['packets_drop'] = (isset($ntpd['droppedpackets']) ? $ntpd['droppedpackets'] : 'U'); - $var['packets_ignore'] = (isset($ntpd['ignoredpackets']) ? $ntpd['ignoredpackets'] : 'U'); - $var['packets_recv'] = (isset($ntpd['receivedpackets']) ? $ntpd['receivedpackets'] : 'U'); - $var['packets_sent'] = (isset($ntpd['packetssent']) ? $ntpd['packetssent'] : 'U'); - foreach ($var as $item => $count) { - echo $count."\n"; - } - -}//end doSNMPv2() - - -doSNMPv2($vars, $vars2, $newstats_style); From 0b9feacde8c0bf2dd3bde08bce55972fa6ddd461 Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 10 Sep 2016 19:08:03 +0300 Subject: [PATCH 061/497] os-updates.sh clean (#59) --- snmp/os-updates.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 09d5bbb96..5132e2de0 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash ################################################################ # copy this script to somewhere like /opt and make chmod +x it # # edit your snmpd.conf and include # @@ -7,6 +7,7 @@ ################################################################ BIN_WC='/usr/bin/wc' CMD_WC='-l' +BIN_GREP='/usr/bin/grep' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' @@ -18,7 +19,7 @@ CMD_PACMAN='-Sup' if [ -f $BIN_APT ]; then # Debian / Ubuntu - UPDATES=`$BIN_APT $CMD_APT | grep 'Inst' | $BIN_WC $CMD_WC` + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` echo $UPDATES; elif [ -f $BIN_YUM ]; then # CentOS / Redhat From f74f93f244b379768c25e7e6fe266bf70e3942dc Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 12 Sep 2016 11:43:34 +0300 Subject: [PATCH 062/497] update 1 ntp server --- snmp/ntp-server.sh | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index dbf0fb99b..08c8d1a3a 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -2,13 +2,15 @@ ################################################################ # copy this script to somewhere like /opt and make chmod +x it # # edit your snmpd.conf and include # -# extend ntp-server /opt/ntp-server.sh # +# extend ntpdserver /opt/ntp-server.sh # # restart snmpd and activate the app for desired host # # please make sure you have the path/binaries below # ################################################################ # Binaries and paths required # ################################################################ +BIN_NTPD='/usr/sbin/ntpd' BIN_NTPQ='/usr/sbin/ntpq' +BIN_NTPDC='/usr/sbin/ntpdc' BIN_GREP='/usr/bin/grep' BIN_TR='/usr/bin/tr' BIN_CUT='/usr/bin/cut' @@ -16,6 +18,8 @@ BIN_SED='/usr/bin/sed' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ +VER=`$BIN_NTPD --version` + CMD0=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` echo $CMD0 @@ -27,7 +31,13 @@ do echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 done -CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +if [[ "$VER" =~ '4.2.8p8' ]] +then + CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +else + CMD2=`$BIN_NTPDC -c iostats | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +fi + IFS=',' read -r -a array <<< "$CMD2" for value in 0 1 2 3 5 6 7 8 From 1029c1bced0e3329cd8e59ae0b2c9f24f3204be5 Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 12 Sep 2016 11:45:14 +0300 Subject: [PATCH 063/497] header fix --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 08c8d1a3a..79d7174d1 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -2,7 +2,7 @@ ################################################################ # copy this script to somewhere like /opt and make chmod +x it # # edit your snmpd.conf and include # -# extend ntpdserver /opt/ntp-server.sh # +# extend ntp-server /opt/ntp-server.sh # # restart snmpd and activate the app for desired host # # please make sure you have the path/binaries below # ################################################################ From eb119c16f72ab2634422ba22ef16d3b568f311c3 Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 12 Sep 2016 11:52:13 +0300 Subject: [PATCH 064/497] better handling default case --- snmp/ntp-server.sh | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 79d7174d1..6134214e6 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -31,11 +31,11 @@ do echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 done -if [[ "$VER" =~ '4.2.8p8' ]] +if [[ "$VER" =~ '4.2.6p5' ]] then - CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` -else CMD2=`$BIN_NTPDC -c iostats | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +else + CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` fi IFS=',' read -r -a array <<< "$CMD2" From ba5c18cb6c55227ed230210791f0335c3dd50d33 Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 12 Sep 2016 12:48:17 +0300 Subject: [PATCH 065/497] cleaner code --- snmp/ntp-server.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 6134214e6..506b2f8d3 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -33,10 +33,11 @@ done if [[ "$VER" =~ '4.2.6p5' ]] then - CMD2=`$BIN_NTPDC -c iostats | $BIN_TR -d ' ' | $BIN_TR '\n' ','` + USECMD=`echo $BIN_NTPDC -c iostats` else - CMD2=`$BIN_NTPQ -c iostats localhost | $BIN_TR -d ' ' | $BIN_TR '\n' ','` + USECMD=`echo $BIN_NTPQ -c iostats localhost` fi +CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_TR '\n' ','` IFS=',' read -r -a array <<< "$CMD2" From a9cf25075572c9f42ded7e507ec890442a9589d2 Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 24 Sep 2016 20:30:09 +0300 Subject: [PATCH 066/497] snmp-extend: ups-nut update 1 (#63) * new code for better matching, snmp-extend compliance * removed unused vars * extra fixes * removed the need of tmp file * removed charge_low, deemed useless by user * removed values that are not plottable * readded ds --- snmp/ups-nut.sh | 27 +++++++++++++++++---------- 1 file changed, 17 insertions(+), 10 deletions(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 321ca9805..2b67ef6f4 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -1,25 +1,32 @@ #!/usr/bin/env bash ################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend ups-nut /opt/ups-nut.sh # +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x ups-nut.sh # +# ------------------------------------------------------------ # +# edit your snmpd.conf and include: # +# extend ups-nut /etc/snmp/ups-nut.sh # +#--------------------------------------------------------------# # restart snmpd and activate the app for desired host # +#--------------------------------------------------------------# # please make sure you have the path/binaries below # ################################################################ -# Binaries and paths required # -################################################################ BIN_UPSC='/usr/bin/upsc' UPSC_CMD='APCUPS' -BIN_SED='/usr/bin/sed' +BIN_CAT='/usr/bin/cat' +BIN_GREP='/usr/bin/grep' BIN_TR='/usr/bin/tr' BIN_CUT='/usr/bin/cut' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -CMD1=`$BIN_UPSC $UPSC_CMD | $BIN_SED "1 d" | $BIN_TR '\n' '|' | $BIN_TR -d ' '` -IFS='|' read -r -a array <<< "$CMD1" +TMP=`$BIN_UPSC $UPSC_CMD 2>/dev/null` -for value in 0 1 5 8 11 12 25 26 31 +for value in "battery.charge:[0-9]+" "battery.low:[0-9]+" "battery.runtime:[0-9]+" "battery.voltage:[0-9.]+" "battery.voltage.nominal:[0-9]+" "input.voltage.nominal:[0-9.]+" "input.voltage:[0-9.]+" "ups.load:[0-9]+" do - echo ${array["$value"]} | $BIN_CUT -d ":" -f 2 + OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eow $value | $BIN_CUT -d ":" -f 2` + if [ -n "$OUT" ]; then + echo $OUT + else + echo "Unknown" + fi done From a76be13463039b308ebfc1fa0a0d1b5b23033825 Mon Sep 17 00:00:00 2001 From: crcro Date: Sun, 25 Sep 2016 16:28:00 +0300 Subject: [PATCH 067/497] snmp-extend: ups-apcups (#58) * snmp-extend-ups-apcups * rewrite of apc ups * header fix * header fix --- snmp/ups-apcups.sh | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100755 snmp/ups-apcups.sh diff --git a/snmp/ups-apcups.sh b/snmp/ups-apcups.sh new file mode 100755 index 000000000..0e41a14e1 --- /dev/null +++ b/snmp/ups-apcups.sh @@ -0,0 +1,30 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x /etc/snmp/ups-apcups.sh # +# ------------------------------------------------------------ # +# edit your snmpd.conf and include: # +# extend ups-apcups /etc/snmp/ups-apcups.sh # +#--------------------------------------------------------------# +# restart snmpd and activate the app for desired host # +#--------------------------------------------------------------# +# please make sure you have the path/binaries below # +################################################################ +BIN_APCS='/sbin/apcaccess' +BIN_TR='/usr/bin/tr' +BIN_CUT='/usr/bin/cut' +BIN_GREP='/usr/bin/grep' +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +TMP=`$BIN_APCS 2>/dev/null` + +for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" +do + OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo $value | $BIN_CUT -d ":" -f 2` + if [ -n "$OUT" ]; then + echo $OUT + else + echo "Unknown" + fi +done \ No newline at end of file From 113e30a8b5949a878246fe7a1af8bb43002d7651 Mon Sep 17 00:00:00 2001 From: crcro Date: Sun, 25 Sep 2016 16:28:37 +0300 Subject: [PATCH 068/497] snmp-extend: os-updates (#65) * reverted back to os-release checks, added arch pacman * fixed file name --- snmp/os-updates.sh | 77 ++++++++++++++++++++++++++-------------------- 1 file changed, 44 insertions(+), 33 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 5132e2de0..e983b3c43 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -1,13 +1,18 @@ #!/usr/bin/env bash ################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend osupdate /opt/os-updates.sh # +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x /etc/snmp/os-updates.sh # +# ------------------------------------------------------------ # +# edit your snmpd.conf and include: # +# extend osupdate /opt/os-updates.sh # +#--------------------------------------------------------------# # restart snmpd and activate the app for desired host # +#--------------------------------------------------------------# +# please make sure you have the path/binaries below # ################################################################ +BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' CMD_WC='-l' -BIN_GREP='/usr/bin/grep' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' @@ -17,34 +22,40 @@ CMD_APT='-s upgrade' BIN_PACMAN='/usr/bin/pacman' CMD_PACMAN='-Sup' -if [ -f $BIN_APT ]; then - # Debian / Ubuntu - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` - echo $UPDATES; -elif [ -f $BIN_YUM ]; then - # CentOS / Redhat - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); - else - echo "0"; - fi -elif [ -f $BIN_ZYPPER ]; then - # OpenSUSE - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 3 ]; then - echo $(($UPDATES-3)); - else - echo "0"; - fi -elif [ -f $BIN_PACMAN ]; then - # Arch - UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); - else - echo "0"; - fi +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +if [ -f /etc/os-release ]; then + OS=`$BIN_AWK -F= '/^ID=/{print $2}' /etc/os-release` + if [ $OS == "opensuse" ]; then + UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 3 ]; then + echo $(($UPDATES-3)); + else + echo "0"; + fi + elif [ $OS == "\"centos\"" ]; then + UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 6 ]; then + echo $(($UPDATES-6)); + else + echo "0"; + fi + elif [ $OS == "ubuntu" ]; then + UPDATES=`$BIN_APT $CMD_APT | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi + elif [ $OS == "arch" ]; then + UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi + fi else - echo "0"; + echo "0"; fi From 20e0518a120ca95e1ee685d6c70ca5fdd3f5f727 Mon Sep 17 00:00:00 2001 From: crcro Date: Mon, 3 Oct 2016 21:27:56 +0300 Subject: [PATCH 069/497] fix conflict (#66) --- snmp/os-updates.sh | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index e983b3c43..0c49ea907 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -12,13 +12,14 @@ ################################################################ BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' +BIN_GREP='/bin/grep' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' CMD_YUM='-q check-update' BIN_APT='/usr/bin/apt-get' -CMD_APT='-s upgrade' +CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/pacman' CMD_PACMAN='-Sup' @@ -42,11 +43,11 @@ if [ -f /etc/os-release ]; then echo "0"; fi elif [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_WC $CMD_WC` + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); + echo $UPDATES; else - echo "0"; + echo "0"; fi elif [ $OS == "arch" ]; then UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` @@ -59,3 +60,4 @@ if [ -f /etc/os-release ]; then else echo "0"; fi + From 7c9372d263008e57746f559aa2cc6e582ca87a0f Mon Sep 17 00:00:00 2001 From: "Tuxis Internet Engineering V.O.F" Date: Wed, 5 Oct 2016 11:06:48 +0200 Subject: [PATCH 070/497] fix: a dirty hack to prevent failing of stats when the cluster is rebuilding (#68) because Ceph returns '-inf' which the json decompiler doesn't seem to get.. --- agent-local/ceph | 6 +++--- debian/changelog | 7 +++++++ 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 60966aa27..c9bd1a0c1 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -18,7 +18,7 @@ from subprocess import check_output import json def cephdf(): - cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]) + cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') s = json.loads(cephdf) print("c:%i:%i:%i" % (s['stats']['total_bytes'], s['stats']['total_used_bytes'], s['stats']['total_avail_bytes'])) @@ -31,13 +31,13 @@ def cephdf(): def osdperf(): - osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]) + osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0') for o in json.loads(osdperf)['osd_perf_infos']: print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) def poolstats(): - poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]) + poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0') for p in json.loads(poolstats): try: diff --git a/debian/changelog b/debian/changelog index a0676a941..39a11a1e3 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +librenms-agent (1.0.6) stable; urgency=low + + - Fix a dirty hack to prevent failing of stats when the cluster is rebuilding + because Ceph returns '-inf' which the json decompiler doesn't seem to get.. + + -- Mark Schouten Tue, 05 Oct 2016 10:57:00 +0200 + librenms-agent (1.0.5) stable; urgency=low - Include a PowerDNS Authoritative agent From d5f61b76013badd781d0de9c82c2b98482852e1f Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Sat, 8 Oct 2016 15:26:07 -0500 Subject: [PATCH 071/497] Agent: script to collect data from GPSD (#69) --- agent-local/gpsd | 53 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100755 agent-local/gpsd diff --git a/agent-local/gpsd b/agent-local/gpsd new file mode 100755 index 000000000..cfb0b0b97 --- /dev/null +++ b/agent-local/gpsd @@ -0,0 +1,53 @@ +#!/usr/bin/env php +>>\n"; + + if (!empty($resp->tpv[0])) { + $tpv = $resp->tpv[0]; + print "mode:{$tpv->mode}\n"; + } + + if (!empty($resp->sky[0])) { + $sky = $resp->sky[0]; + + $sat_count = count($sky->satellites); + $sat_used = count(array_filter($sky->satellites, "satellite_used")); + + print "hdop:{$sky->hdop}\n"; + print "vdop:{$sky->vdop}\n"; + print "satellites:{$sat_count}\n"; + print "satellites_used:{$sat_used}\n"; + } + } +} + +function satellite_used($sat) { + return $sat->used; +} \ No newline at end of file From 423cd9b42f1fc326029b5f0431c8bd7c71403447 Mon Sep 17 00:00:00 2001 From: Mathias B Date: Thu, 17 Nov 2016 09:31:56 +0100 Subject: [PATCH 072/497] Add Debian support (#71) Before that only Ubuntu was supported, now Debian users can use this nice script too! --- snmp/os-updates.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 0c49ea907..aef6eff06 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -42,6 +42,13 @@ if [ -f /etc/os-release ]; then else echo "0"; fi + elif [ $OS == "debian" ]; then + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $UPDATES; + else + echo "0"; + fi elif [ $OS == "ubuntu" ]; then UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then From 3bfbf3feeefdfc76e6e18bb2d03c0154a739cac9 Mon Sep 17 00:00:00 2001 From: arrmo Date: Sat, 26 Nov 2016 02:12:41 -0600 Subject: [PATCH 073/497] Update to Distro (to support dd-wrt) (#72) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 11fc5d507..b8f15f835 100755 --- a/snmp/distro +++ b/snmp/distro @@ -77,6 +77,10 @@ elif [ "${OS}" = "Linux" ] ; then fi fi + if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then + DIST="dd-wrt" + fi + if [ -n "${REV}" ] then OSSTR="${DIST} ${REV}" From b87c3203f0c7a25e77688afe25317d0f14948780 Mon Sep 17 00:00:00 2001 From: Paul Gear Date: Sat, 3 Dec 2016 14:07:02 +1000 Subject: [PATCH 074/497] Make ups-nut work on Debian Jessie This script was broken on Debian Jessie (and probably all other Debian-based distros, including Ubuntu). This commit removes the hard-coding of paths and uses $PATH per normal bash practice, and should work on a wider range of shell variants. --- snmp/ups-nut.sh | 35 ++++++++++++++--------------------- 1 file changed, 14 insertions(+), 21 deletions(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 2b67ef6f4..2ea738c23 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -1,29 +1,22 @@ #!/usr/bin/env bash ################################################################ -# copy this script to /etc/snmp/ and make it executable: # -# chmod +x ups-nut.sh # -# ------------------------------------------------------------ # -# edit your snmpd.conf and include: # -# extend ups-nut /etc/snmp/ups-nut.sh # -#--------------------------------------------------------------# -# restart snmpd and activate the app for desired host # -#--------------------------------------------------------------# -# please make sure you have the path/binaries below # +# Instructions: # +# 1. copy this script to /etc/snmp/ and make it executable: # +# chmod +x ups-nut.sh # +# 2. make sure UPS_NAME below matches the name of your UPS # +# 3. edit your snmpd.conf to include this line: # +# extend ups-nut /etc/snmp/ups-nut.sh # +# 4. restart snmpd on the host # +# 5. activate the app for the desired host in LibreNMS # ################################################################ -BIN_UPSC='/usr/bin/upsc' -UPSC_CMD='APCUPS' -BIN_CAT='/usr/bin/cat' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_CUT='/usr/bin/cut' -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ -TMP=`$BIN_UPSC $UPSC_CMD 2>/dev/null` +UPS_NAME='apc' + +PATH=$PATH:/usr/bin:/bin +TMP=$(upsc $UPS_NAME 2>/dev/null) -for value in "battery.charge:[0-9]+" "battery.low:[0-9]+" "battery.runtime:[0-9]+" "battery.voltage:[0-9.]+" "battery.voltage.nominal:[0-9]+" "input.voltage.nominal:[0-9.]+" "input.voltage:[0-9.]+" "ups.load:[0-9]+" +for value in "battery\.charge: [0-9.]+" "battery\.(runtime\.)?low: [0-9]+" "battery\.runtime: [0-9]+" "battery\.voltage: [0-9.]+" "battery\.voltage\.nominal: [0-9]+" "input\.voltage\.nominal: [0-9.]+" "input\.voltage: [0-9.]+" "ups\.load: [0-9.]+" do - OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eow $value | $BIN_CUT -d ":" -f 2` + OUT=$(echo $TMP | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1) if [ -n "$OUT" ]; then echo $OUT else From 75eb57f37cf3fdba78cff92f9fe3cd0668a3442a Mon Sep 17 00:00:00 2001 From: Paul Gear Date: Sat, 3 Dec 2016 14:16:54 +1000 Subject: [PATCH 075/497] Restore previous default UPS name --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 2ea738c23..cc04c8468 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -9,7 +9,7 @@ # 4. restart snmpd on the host # # 5. activate the app for the desired host in LibreNMS # ################################################################ -UPS_NAME='apc' +UPS_NAME='APCUPS' PATH=$PATH:/usr/bin:/bin TMP=$(upsc $UPS_NAME 2>/dev/null) From 4c218d0a6981a5776e92a694be4f6080dbdf77dc Mon Sep 17 00:00:00 2001 From: Jason Scalia Date: Wed, 7 Dec 2016 21:48:22 -0500 Subject: [PATCH 076/497] Added fedora support --- snmp/distro | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index b8f15f835..482adcdd3 100755 --- a/snmp/distro +++ b/snmp/distro @@ -16,7 +16,11 @@ elif [ "${OS}" = "AIX" ] ; then elif [ "${OS}" = "Linux" ] ; then KERNEL=`uname -r` - if [ -f /etc/redhat-release ] ; then + if [ -f /etc/fedora-release ]; then + DIST=$(cat /etc/fedora-release | awk '{print $1}') + REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//` + + elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then DIST="CentOS" From 59ca14dbdae6841ef271acd0f1d8b3aa49f5c335 Mon Sep 17 00:00:00 2001 From: Jason Scalia Date: Wed, 7 Dec 2016 22:11:48 -0500 Subject: [PATCH 077/497] added fedora/dnf support --- snmp/os-updates.sh | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index aef6eff06..bd31874a5 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -18,6 +18,8 @@ BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' BIN_YUM='/usr/bin/yum' CMD_YUM='-q check-update' +BIN_DNF='/usr/bin/dnf' +CMD_DNF='-q check-update' BIN_APT='/usr/bin/apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/pacman' @@ -42,6 +44,13 @@ if [ -f /etc/os-release ]; then else echo "0"; fi + elif [ $OS == "fedora" ]; then + UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 6 ]; then + echo $(($UPDATES-6)); + else + echo "0"; + fi elif [ $OS == "debian" ]; then UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then From 9168c19ce1b434f61d420eda672f6f51289b7647 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Wed, 14 Dec 2016 16:27:15 -0600 Subject: [PATCH 078/497] Support python2.6 for powerdns scripts fixes #67 --- agent-local/powerdns | 5 +++-- agent-local/powerdns-recursor | 3 ++- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/agent-local/powerdns b/agent-local/powerdns index acd251d78..913abcf90 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -1,6 +1,6 @@ #!/usr/bin/env python -from subprocess import check_output +from subprocess import Popen, PIPE vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', 'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', @@ -10,8 +10,9 @@ vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', 'udp4-queries', 'udp6-answers', 'udp6-queries' ] rvars = {} +cmd = ['/usr/bin/pdns_control', 'show', '*'] -for l in check_output(['/usr/bin/pdns_control', 'show', '*']).decode().rstrip().split(','): +for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): v = l.split('=') if len(v) > 1: rvars[v[0]] = v[1] diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor index 7c5fbf2ad..0ac290489 100755 --- a/agent-local/powerdns-recursor +++ b/agent-local/powerdns-recursor @@ -1,7 +1,8 @@ #!/usr/bin/python import json, subprocess +from subprocess import Popen, PIPE -input = subprocess.check_output(['rec_control', 'get-all']) +input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] data = [] for line in input.splitlines(): From a0f779cf1aa7ff7ec0251352d10508dde04aeb3d Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 20 Jan 2017 10:47:30 +0200 Subject: [PATCH 079/497] snmp: exim-stats (#79) * exim-stats frozen mails * added total queue info --- snmp/exim-stats.sh | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) create mode 100644 snmp/exim-stats.sh diff --git a/snmp/exim-stats.sh b/snmp/exim-stats.sh new file mode 100644 index 000000000..4b430d549 --- /dev/null +++ b/snmp/exim-stats.sh @@ -0,0 +1,35 @@ +#!/usr/bin/env bash +# (C) 2017 Cercel Valentin +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +################################################################# +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x /etc/snmp/exim-stats.sh # +# ------------------------------------------------------------- # +# edit your snmpd.conf and include: # +# extend exim-stats /etc/snmp/exim-stats.sh # +# ------------------------------------------------------------- # +# restart snmpd and activate the app for desired host # +################################################################# +BIN_EXIM=`which exim` +BIN_GREP=`which grep` +BIN_WC=`which wc` +CFG_EXIM_1='-bp' +CFG_EXIM_2='-bpc' +CFG_GREP='frozen' +CFG_WC='-l' +################################################################# + +FROZEN=`$BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC` +echo $FROZEN + +QUEUE=`$BIN_EXIM $CFG_EXIM_2` +echo $QUEUE \ No newline at end of file From 0da25991a0c635988589ad59e36d95940031d57b Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 15 Feb 2017 11:37:18 -0600 Subject: [PATCH 080/497] add a single pool php-fpm net-snmpd extend poller --- snmp/phpfpm-sp | 148 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 snmp/phpfpm-sp diff --git a/snmp/phpfpm-sp b/snmp/phpfpm-sp new file mode 100644 index 000000000..5466547e0 --- /dev/null +++ b/snmp/phpfpm-sp @@ -0,0 +1,148 @@ +#!/usr/local/bin/bash + +# add this to snmpd.conf as below... actual path to the script can vary +# extend phpfpmsp /root/snmp-extends/phpfpm-sp +# +# The location is is irrelevant. It just needs to be executable, +# have access to curl, and be reachable by the snmpd. +# +# You can check it via... +# snmpget -v 2c -c public localhost .1.3.6.1.4.1.8072.1.3.2.3.1.2.8.112.104.112.102.112.109.115.112 +# which should print out... actual values will very... +# NET-SNMP-EXTEND-MIB::nsExtendOutputFull."phpfpmsp" = STRING: www +# 11/Feb/2017:18:45:43 -0600 +# 189514 +# 178516 +# 0 +# 0 +# 0 +# 2 +# 1 +# 3 +# 20 +# 13 +# 0 +# +# In order the returned values are. +# +# pool +# start_time +# start_since +# accepted_conn +# listen_queue +# max_listen_queue +# listen_queue_len +# idle_processes +# active_processes +# total_processes +# max_active_processes +# max_children_reached +# slow_requests + + +# the URL to fetch, change as needed +url="http://localhost/status?full" +opts="" + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# GPL v3+ +# +# Contributed by @safeie with PR #276 +# Modified to work as a SNMP extend by Zane C. Bowers-Hadley + +declare -A phpfpm_urls=() +declare -A phpfpm_curl_opts=() + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +phpfpm_update_every= +phpfpm_priority=60000 + +declare -a phpfpm_response=() +phpfpm_pool="" +phpfpm_start_time="" +phpfpm_start_since=0 +phpfpm_accepted_conn=0 +phpfpm_listen_queue=0 +phpfpm_max_listen_queue=0 +phpfpm_listen_queue_len=0 +phpfpm_idle_processes=0 +phpfpm_active_processes=0 +phpfpm_total_processes=0 +phpfpm_max_active_processes=0 +phpfpm_max_children_reached=0 +phpfpm_slow_requests=0 + + +# local opts="${1}" url="${2}" + + phpfpm_response=($(curl -Ss ${opts} "${url}")) + [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + + if [[ "${phpfpm_response[0]}" != "pool:" \ + || "${phpfpm_response[2]}" != "process" \ + || "${phpfpm_response[5]}" != "start" \ + || "${phpfpm_response[12]}" != "accepted" \ + || "${phpfpm_response[15]}" != "listen" \ + || "${phpfpm_response[16]}" != "queue:" \ + || "${phpfpm_response[26]}" != "idle" \ + || "${phpfpm_response[29]}" != "active" \ + || "${phpfpm_response[32]}" != "total" \ + ]] + then + echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" + exit 1; + fi + + phpfpm_pool="${phpfpm_response[1]}" + phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" + phpfpm_start_since="${phpfpm_response[11]}" + phpfpm_accepted_conn="${phpfpm_response[14]}" + phpfpm_listen_queue="${phpfpm_response[17]}" + phpfpm_max_listen_queue="${phpfpm_response[21]}" + phpfpm_listen_queue_len="${phpfpm_response[25]}" + phpfpm_idle_processes="${phpfpm_response[28]}" + phpfpm_active_processes="${phpfpm_response[31]}" + phpfpm_total_processes="${phpfpm_response[34]}" + phpfpm_max_active_processes="${phpfpm_response[38]}" + phpfpm_max_children_reached="${phpfpm_response[42]}" + if [ "${phpfpm_response[43]}" == "slow" ] + then + phpfpm_slow_requests="${phpfpm_response[45]}" + else + phpfpm_slow_requests="-1" + fi + + if [[ -z "${phpfpm_pool}" \ + || -z "${phpfpm_start_time}" \ + || -z "${phpfpm_start_since}" \ + || -z "${phpfpm_accepted_conn}" \ + || -z "${phpfpm_listen_queue}" \ + || -z "${phpfpm_max_listen_queue}" \ + || -z "${phpfpm_listen_queue_len}" \ + || -z "${phpfpm_idle_processes}" \ + || -z "${phpfpm_active_processes}" \ + || -z "${phpfpm_total_processes}" \ + || -z "${phpfpm_max_active_processes}" \ + || -z "${phpfpm_max_children_reached}" \ + ]] + then + echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" + exit 1 + fi + +echo $phpfpm_pool +echo $phpfpm_start_time +echo $phpfpm_start_since +echo $phpfpm_accepted_conn +echo $phpfpm_listen_queue +echo $phpfpm_max_listen_queue +echo $phpfpm_listen_queue_len +echo $phpfpm_idle_processes +echo $phpfpm_active_processes +echo $phpfpm_total_processes +echo $phpfpm_max_active_processes +echo $phpfpm_max_children_reached +echo $phpfpm_slow_requests From 0733d924de5091ec1fd7ea493fe10a75d4cd8f86 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 16 Feb 2017 09:10:53 -0600 Subject: [PATCH 081/497] add a single pool php-fpm net-snmpd extend poller (#83) --- snmp/phpfpm-sp | 148 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 148 insertions(+) create mode 100644 snmp/phpfpm-sp diff --git a/snmp/phpfpm-sp b/snmp/phpfpm-sp new file mode 100644 index 000000000..5466547e0 --- /dev/null +++ b/snmp/phpfpm-sp @@ -0,0 +1,148 @@ +#!/usr/local/bin/bash + +# add this to snmpd.conf as below... actual path to the script can vary +# extend phpfpmsp /root/snmp-extends/phpfpm-sp +# +# The location is is irrelevant. It just needs to be executable, +# have access to curl, and be reachable by the snmpd. +# +# You can check it via... +# snmpget -v 2c -c public localhost .1.3.6.1.4.1.8072.1.3.2.3.1.2.8.112.104.112.102.112.109.115.112 +# which should print out... actual values will very... +# NET-SNMP-EXTEND-MIB::nsExtendOutputFull."phpfpmsp" = STRING: www +# 11/Feb/2017:18:45:43 -0600 +# 189514 +# 178516 +# 0 +# 0 +# 0 +# 2 +# 1 +# 3 +# 20 +# 13 +# 0 +# +# In order the returned values are. +# +# pool +# start_time +# start_since +# accepted_conn +# listen_queue +# max_listen_queue +# listen_queue_len +# idle_processes +# active_processes +# total_processes +# max_active_processes +# max_children_reached +# slow_requests + + +# the URL to fetch, change as needed +url="http://localhost/status?full" +opts="" + +# netdata +# real-time performance and health monitoring, done right! +# (C) 2016 Costa Tsaousis +# GPL v3+ +# +# Contributed by @safeie with PR #276 +# Modified to work as a SNMP extend by Zane C. Bowers-Hadley + +declare -A phpfpm_urls=() +declare -A phpfpm_curl_opts=() + +# _update_every is a special variable - it holds the number of seconds +# between the calls of the _update() function +phpfpm_update_every= +phpfpm_priority=60000 + +declare -a phpfpm_response=() +phpfpm_pool="" +phpfpm_start_time="" +phpfpm_start_since=0 +phpfpm_accepted_conn=0 +phpfpm_listen_queue=0 +phpfpm_max_listen_queue=0 +phpfpm_listen_queue_len=0 +phpfpm_idle_processes=0 +phpfpm_active_processes=0 +phpfpm_total_processes=0 +phpfpm_max_active_processes=0 +phpfpm_max_children_reached=0 +phpfpm_slow_requests=0 + + +# local opts="${1}" url="${2}" + + phpfpm_response=($(curl -Ss ${opts} "${url}")) + [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + + if [[ "${phpfpm_response[0]}" != "pool:" \ + || "${phpfpm_response[2]}" != "process" \ + || "${phpfpm_response[5]}" != "start" \ + || "${phpfpm_response[12]}" != "accepted" \ + || "${phpfpm_response[15]}" != "listen" \ + || "${phpfpm_response[16]}" != "queue:" \ + || "${phpfpm_response[26]}" != "idle" \ + || "${phpfpm_response[29]}" != "active" \ + || "${phpfpm_response[32]}" != "total" \ + ]] + then + echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" + exit 1; + fi + + phpfpm_pool="${phpfpm_response[1]}" + phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" + phpfpm_start_since="${phpfpm_response[11]}" + phpfpm_accepted_conn="${phpfpm_response[14]}" + phpfpm_listen_queue="${phpfpm_response[17]}" + phpfpm_max_listen_queue="${phpfpm_response[21]}" + phpfpm_listen_queue_len="${phpfpm_response[25]}" + phpfpm_idle_processes="${phpfpm_response[28]}" + phpfpm_active_processes="${phpfpm_response[31]}" + phpfpm_total_processes="${phpfpm_response[34]}" + phpfpm_max_active_processes="${phpfpm_response[38]}" + phpfpm_max_children_reached="${phpfpm_response[42]}" + if [ "${phpfpm_response[43]}" == "slow" ] + then + phpfpm_slow_requests="${phpfpm_response[45]}" + else + phpfpm_slow_requests="-1" + fi + + if [[ -z "${phpfpm_pool}" \ + || -z "${phpfpm_start_time}" \ + || -z "${phpfpm_start_since}" \ + || -z "${phpfpm_accepted_conn}" \ + || -z "${phpfpm_listen_queue}" \ + || -z "${phpfpm_max_listen_queue}" \ + || -z "${phpfpm_listen_queue_len}" \ + || -z "${phpfpm_idle_processes}" \ + || -z "${phpfpm_active_processes}" \ + || -z "${phpfpm_total_processes}" \ + || -z "${phpfpm_max_active_processes}" \ + || -z "${phpfpm_max_children_reached}" \ + ]] + then + echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" + exit 1 + fi + +echo $phpfpm_pool +echo $phpfpm_start_time +echo $phpfpm_start_since +echo $phpfpm_accepted_conn +echo $phpfpm_listen_queue +echo $phpfpm_max_listen_queue +echo $phpfpm_listen_queue_len +echo $phpfpm_idle_processes +echo $phpfpm_active_processes +echo $phpfpm_total_processes +echo $phpfpm_max_active_processes +echo $phpfpm_max_children_reached +echo $phpfpm_slow_requests From d3116b20177cc7d0dd13743e6763a2ced268dda7 Mon Sep 17 00:00:00 2001 From: rockyluke Date: Thu, 16 Feb 2017 16:12:01 +0100 Subject: [PATCH 082/497] Use command -v instead binary path (#80) --- snmp/ntp-client.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh index 447c1f796..aa56f810d 100755 --- a/snmp/ntp-client.sh +++ b/snmp/ntp-client.sh @@ -7,11 +7,11 @@ # please make sure you have the path/binaries below # ################################################################ # Binaries and paths required # -################################################################ -BIN_NTPQ='/usr/sbin/ntpq' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_CUT='/usr/bin/cut' +################################################################ +BIN_NTPQ="$(command -v ntpq)" +BIN_GREP="$(command -v grep)" +BIN_TR="$(command -v tr)" +BIN_CUT="$(command -v cut)" ################################################################ # Don't change anything unless you know what are you doing # ################################################################ From c07cf27ae00fe6e6d91a91d62314d2be66d10d20 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 16 Feb 2017 10:50:46 -0600 Subject: [PATCH 083/497] add fail2ban snmpd extend script --- snmp/fail2ban | 15 +++++++++++++++ 1 file changed, 15 insertions(+) create mode 100644 snmp/fail2ban diff --git a/snmp/fail2ban b/snmp/fail2ban new file mode 100644 index 000000000..4566e789c --- /dev/null +++ b/snmp/fail2ban @@ -0,0 +1,15 @@ +#!/bin/sh + +# Addthis to snmpd.conf as below. +# extend fail2ban /etc/snmp/fail2ban +# +# Also please verify your fail to ban instalation for proper table/chain names. + +if [ `uname` = "FreeBSD" ]; then + /sbin/pfctl -t fail2ban -T show | /usr/bin/grep -c . +fi +if [ `uname` = "Linux" ]; then + iptables -L -n | grep -c f2b\- +fi + +exit 0; From cd875a0ca5bec0c1ad562106d4cc422534dc8c52 Mon Sep 17 00:00:00 2001 From: Robert Verspuy Date: Fri, 17 Feb 2017 01:46:13 +0100 Subject: [PATCH 084/497] Fixed correct amount of pipeline symbols when degrees symbol is missing. (#85) When the script is called through xinetd/check_mk (on my system), there is no degree symbol, but a space. Changed the script to handle both correctly --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index eb749cb0a..18e3bdbad 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -19,7 +19,7 @@ hddtemp=`which hddtemp 2>/dev/null` if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then - content=`${hddtemp} -w -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/°/\|/g';` + content=`${hddtemp} -w -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From 4473c12d91d331b4f2ee14a27103287afe0b120c Mon Sep 17 00:00:00 2001 From: Florian Beer Date: Fri, 17 Feb 2017 16:37:00 +0100 Subject: [PATCH 085/497] Update shebang With the original shebang this script didn't work on Debian and Ubuntu machines. Using `/usr/bin/env bash` makes the script more portable. --- snmp/phpfpm-sp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/phpfpm-sp b/snmp/phpfpm-sp index 5466547e0..2ae5a5e95 100644 --- a/snmp/phpfpm-sp +++ b/snmp/phpfpm-sp @@ -1,4 +1,4 @@ -#!/usr/local/bin/bash +#!/usr/bin/env bash # add this to snmpd.conf as below... actual path to the script can vary # extend phpfpmsp /root/snmp-extends/phpfpm-sp From 9300a76abbce92122d4daa94a6c1cb01c96507dd Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 17 Feb 2017 10:32:27 -0600 Subject: [PATCH 086/497] update to check fail2ban and f2b --- snmp/fail2ban | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 4566e789c..f4b396975 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -9,7 +9,9 @@ if [ `uname` = "FreeBSD" ]; then /sbin/pfctl -t fail2ban -T show | /usr/bin/grep -c . fi if [ `uname` = "Linux" ]; then - iptables -L -n | grep -c f2b\- + f2b1=`iptables -L -n | grep -c f2b\-` + f2b2=`iptables -L -n | grep -c fail2ban\-` + expr $f2b1 + $f2b2 fi exit 0; From 909453afd9c37442133023ecb9550ca67006f489 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 17 Feb 2017 10:33:02 -0600 Subject: [PATCH 087/497] don't assume it appends the jail name --- snmp/fail2ban | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index f4b396975..6eeb9f3a1 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -9,8 +9,8 @@ if [ `uname` = "FreeBSD" ]; then /sbin/pfctl -t fail2ban -T show | /usr/bin/grep -c . fi if [ `uname` = "Linux" ]; then - f2b1=`iptables -L -n | grep -c f2b\-` - f2b2=`iptables -L -n | grep -c fail2ban\-` + f2b1=`iptables -L -n | grep -c f2b\` + f2b2=`iptables -L -n | grep -c fail2ban\` expr $f2b1 + $f2b2 fi From 47a5465499929eb0aaabeb0a3658e58aae49e89b Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 17 Feb 2017 23:08:25 -0600 Subject: [PATCH 088/497] whoops, forgot to remove \ as well --- snmp/fail2ban | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 6eeb9f3a1..817aaa35c 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -9,8 +9,8 @@ if [ `uname` = "FreeBSD" ]; then /sbin/pfctl -t fail2ban -T show | /usr/bin/grep -c . fi if [ `uname` = "Linux" ]; then - f2b1=`iptables -L -n | grep -c f2b\` - f2b2=`iptables -L -n | grep -c fail2ban\` + f2b1=`iptables -L -n | grep -c f2b` + f2b2=`iptables -L -n | grep -c fail2ban` expr $f2b1 + $f2b2 fi From 2fdc884008a010e31f500fa480c840d6ef4d8193 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 17 Feb 2017 23:08:53 -0600 Subject: [PATCH 089/497] remove pointless exit --- snmp/fail2ban | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 817aaa35c..46c7c554e 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -13,5 +13,3 @@ if [ `uname` = "Linux" ]; then f2b2=`iptables -L -n | grep -c fail2ban` expr $f2b1 + $f2b2 fi - -exit 0; From 8d361cd5a6c05c69703f14d8292cd143bbd4354f Mon Sep 17 00:00:00 2001 From: VVelox Date: Sat, 18 Feb 2017 00:09:12 -0600 Subject: [PATCH 090/497] move this over to perl and properly check iptables --- snmp/fail2ban | 68 ++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 57 insertions(+), 11 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 46c7c554e..d2d5b7195 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -1,15 +1,61 @@ -#!/bin/sh +#!/usr/local/bin/perl # Addthis to snmpd.conf as below. # extend fail2ban /etc/snmp/fail2ban # -# Also please verify your fail to ban instalation for proper table/chain names. - -if [ `uname` = "FreeBSD" ]; then - /sbin/pfctl -t fail2ban -T show | /usr/bin/grep -c . -fi -if [ `uname` = "Linux" ]; then - f2b1=`iptables -L -n | grep -c f2b` - f2b2=`iptables -L -n | grep -c fail2ban` - expr $f2b1 + $f2b2 -fi +# Please verify that the tables below are correct for your installation + +my @linuxChains=('failban','f2b'); +my $freebsdPFtable='fail2ban'; + +# You should not have to touch anything below this. + +my $os=`uname`; + +if ( $os =~ '^FreeBSD' ){ + print `/sbin/pfctl -t $freebsdPFtable -T show | /usr/bin/grep -c .`; + exit; +}; + +if ( $os =~ '^Linux' ){ + my $iptables=`iptables -L -n`; + + my @iptablesA=split( /\n/, $iptables ); + + #check each line + my $int=0; + my $banned=0; + my $count=0; + while( defined( $iptablesA[$int] ) ){ + my $line=$iptablesA[$int]; + + #stop counting if we have a blank line + if ( $line =~ /^$/ ){ + $count=0; + } + + #count /^REJECT/ lines, if we are counting + if ( ( $line =~ /^REJECT/ ) && ( $count ) ){ + $banned++; + } + + #check if this is a chain we should count + if ( $line =~ /^Chain/ ){ + my $linuxChainsInt=0; + # check if any of the specified names hit and if so start counting + while( defined( $linuxChains[$linuxChainsInt] ) ){ + my $chain=$linuxChains[$linuxChainsInt]; + if ( $line =~ /^Chain $chain/ ){ + $count=1; + } + + $linuxChainsInt++; + } + } + + $int++; + } + + print $banned."\n"; +} + From 0d9e14a7850ee8c8e2c2b2dae134d904905dcf88 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sun, 19 Feb 2017 23:41:51 -0600 Subject: [PATCH 091/497] use fail2ban-client instead --- snmp/fail2ban | 94 +++++++++++++++++++++------------------------------ 1 file changed, 38 insertions(+), 56 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index d2d5b7195..1500fd6ec 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -1,61 +1,43 @@ #!/usr/local/bin/perl -# Addthis to snmpd.conf as below. +# Add this to your snmpd.conf as below. # extend fail2ban /etc/snmp/fail2ban -# -# Please verify that the tables below are correct for your installation - -my @linuxChains=('failban','f2b'); -my $freebsdPFtable='fail2ban'; - -# You should not have to touch anything below this. - -my $os=`uname`; - -if ( $os =~ '^FreeBSD' ){ - print `/sbin/pfctl -t $freebsdPFtable -T show | /usr/bin/grep -c .`; - exit; -}; - -if ( $os =~ '^Linux' ){ - my $iptables=`iptables -L -n`; - - my @iptablesA=split( /\n/, $iptables ); - - #check each line - my $int=0; - my $banned=0; - my $count=0; - while( defined( $iptablesA[$int] ) ){ - my $line=$iptablesA[$int]; - - #stop counting if we have a blank line - if ( $line =~ /^$/ ){ - $count=0; - } - - #count /^REJECT/ lines, if we are counting - if ( ( $line =~ /^REJECT/ ) && ( $count ) ){ - $banned++; - } - - #check if this is a chain we should count - if ( $line =~ /^Chain/ ){ - my $linuxChainsInt=0; - # check if any of the specified names hit and if so start counting - while( defined( $linuxChains[$linuxChainsInt] ) ){ - my $chain=$linuxChains[$linuxChainsInt]; - if ( $line =~ /^Chain $chain/ ){ - $count=1; - } - - $linuxChainsInt++; - } - } - - $int++; - } - - print $banned."\n"; + +#make sure this path is correct +my $f2bc="/usr/local/bin/fail2ban-client"; + +## +## you should not have to touch anything below this +## +use strict; +use warnings; + +#gets a list of jails +my $jailsOutput=`$f2bc status`; +my @jailsOutputA=split(/\n/, $jailsOutput); +my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); +$jailsS=~s/.*\://; +$jailsS=~s/\s//g; +my @jails=split(/\,/, $jailsS); + +#process jail +my $int=0; +my $total=0; +my $toReturn=''; +while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`fail2ban-client status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $total=$total+$jailTotal; + $toReturn=$toReturn.$jails[$int].' '.$jailTotal."\n"; + + $int++; } +print $total."\n".$toReturn; From 655099ce7a80a13ea34d8b1c7dab8481e3a79636 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 20 Feb 2017 03:49:50 -0600 Subject: [PATCH 092/497] now requires cron usage --- snmp/fail2ban | 94 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 67 insertions(+), 27 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 1500fd6ec..3c5212609 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -2,42 +2,82 @@ # Add this to your snmpd.conf as below. # extend fail2ban /etc/snmp/fail2ban +# +# Then add to your cron tab... +# */3 * * * * /root/snmp-extends/fail2ban.pl -u #make sure this path is correct my $f2bc="/usr/local/bin/fail2ban-client"; +# The cache file to use. +my $cache='/var/cache/fail2ban'; + + ## ## you should not have to touch anything below this ## use strict; use warnings; +use Getopt::Std; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "fail2ban-client SNMP extend 0.0.0\n"; +}; -#gets a list of jails -my $jailsOutput=`$f2bc status`; -my @jailsOutputA=split(/\n/, $jailsOutput); -my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); -$jailsS=~s/.*\://; -$jailsS=~s/\s//g; -my @jails=split(/\,/, $jailsS); - -#process jail -my $int=0; -my $total=0; -my $toReturn=''; -while(defined($jails[$int])){ - - #get the total for this jail - my $jailStatusOutput=`fail2ban-client status $jails[$int]`; - my @jailStatusOutputA=split(/\n/, $jailStatusOutput); - my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); - $jailTotal=~s/.*\://; - $jailTotal=~s/\s//g; - - #tally the total and add this jail to the list - $total=$total+$jailTotal; - $toReturn=$toReturn.$jails[$int].' '.$jailTotal."\n"; - - $int++; + +sub main::HELP_MESSAGE { + print "\n". + "-u Update '".$cache."'\n"; } -print $total."\n".$toReturn; +#gets the options +my %opts=(); +getopts('u', \%opts); + +if (defined( $opts{u} )){ + + #gets a list of jails + my $jailsOutput=`$f2bc status`; + my @jailsOutputA=split(/\n/, $jailsOutput); + my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); + $jailsS=~s/.*\://; + $jailsS=~s/\s//g; + my @jails=split(/\,/, $jailsS); + + #process jail + my $int=0; + my $total=0; + my $toReturn=''; + while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`fail2ban-client status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $total=$total+$jailTotal; + $toReturn=$toReturn.$jails[$int].' '.$jailTotal."\n"; + + $int++; + } + + open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; + print $writefh $total."\n".$toReturn; + close($writefh); + + exit 0; +} + + +my $old=''; +open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; +# if this is over 2048, something is most likely wrong +read($readfh , $old , 10240); +close($readfh); +print $old; + + From ba1bb32a84a8c14e4bca4cb3a28c55de75b778a5 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 20 Feb 2017 03:50:59 -0600 Subject: [PATCH 093/497] correct a comment --- snmp/fail2ban | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 3c5212609..033e5c131 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -75,7 +75,7 @@ if (defined( $opts{u} )){ my $old=''; open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; -# if this is over 2048, something is most likely wrong +# if this is over 10240, something is most likely wrong read($readfh , $old , 10240); close($readfh); print $old; From 41ca0f321129a8c76f610ec81c3f8d07a73c608d Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 20 Feb 2017 13:18:50 -0600 Subject: [PATCH 094/497] track both firewall and fail2ban-client --- snmp/fail2ban | 64 ++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 61 insertions(+), 3 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 033e5c131..4e22c6733 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -12,6 +12,9 @@ my $f2bc="/usr/local/bin/fail2ban-client"; # The cache file to use. my $cache='/var/cache/fail2ban'; +# Please verify that the tables below are correct for your installation +my @linuxChains=('failban','f2b'); +my $freebsdPFtable='fail2ban'; ## ## you should not have to touch anything below this @@ -64,9 +67,65 @@ if (defined( $opts{u} )){ $int++; } + + ## + ## process the firewall + ## + + my $os=`uname`; + + my $firewalled=0; + if ( $os =~ '^FreeBSD' ){ + $firewalled=`/sbin/pfctl -t $freebsdPFtable -T show | /usr/bin/grep -c .`; + chomp($firewalled); + }; + + if ( $os =~ '^Linux' ){ + my $iptables=`iptables -L -n`; + my @iptablesA=split( /\n/, $iptables ); + + #check each line + my $int=0; + my $firewalled=0; + my $count=0; + while( defined( $iptablesA[$int] ) ){ + my $line=$iptablesA[$int]; + + #stop counting if we have a blank line + if ( $line =~ /^$/ ){ + $count=0; + } + + #count /^REJECT/ lines, if we are counting + if ( ( $line =~ /^REJECT/ ) && ( $count ) ){ + $firewalled++; + } + + #check if this is a chain we should count + if ( $line =~ /^Chain/ ){ + my $linuxChainsInt=0; + # check if any of the specified names hit and if so start counting + while( defined( $linuxChains[$linuxChainsInt] ) ){ + my $chain=$linuxChains[$linuxChainsInt]; + if ( $line =~ /^Chain $chain/ ){ + $count=1; + } + + $linuxChainsInt++; + } + } + + $int++; + } + + } + + ## + ## render the output + ## open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; - print $writefh $total."\n".$toReturn; + print $writefh $total."\n".$firewalled."\n".$toReturn; close($writefh); exit 0; @@ -75,9 +134,8 @@ if (defined( $opts{u} )){ my $old=''; open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; -# if this is over 10240, something is most likely wrong +# if this is over 2048, something is most likely wrong read($readfh , $old , 10240); close($readfh); print $old; - From 28f225dcfe4e050d7f523d59ff46e8fe83298963 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 22 Feb 2017 21:42:53 -0600 Subject: [PATCH 095/497] Update fail2ban --- snmp/fail2ban | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 4e22c6733..3bb362b2f 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -1,4 +1,4 @@ -#!/usr/local/bin/perl +#!/usr/bin/env perl # Add this to your snmpd.conf as below. # extend fail2ban /etc/snmp/fail2ban From f9a66931c94601dd2392d6498258a8f9c39fc206 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 22 Feb 2017 21:58:03 -0600 Subject: [PATCH 096/497] make caching optional --- snmp/fail2ban | 44 +++++++++++++++++++++++++++++--------------- 1 file changed, 29 insertions(+), 15 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 3bb362b2f..a744eae1c 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -3,13 +3,13 @@ # Add this to your snmpd.conf as below. # extend fail2ban /etc/snmp/fail2ban # -# Then add to your cron tab... +# Then add to your cron tab, if you wish to use caching. # */3 * * * * /root/snmp-extends/fail2ban.pl -u #make sure this path is correct my $f2bc="/usr/local/bin/fail2ban-client"; -# The cache file to use. +# The cache file to use, if using caching. my $cache='/var/cache/fail2ban'; # Please verify that the tables below are correct for your installation @@ -38,6 +38,24 @@ sub main::HELP_MESSAGE { my %opts=(); getopts('u', \%opts); +#if set to 1, no cache will be written and it will be printed instead +my $noWrite=0; + +if ( ! defined( $opts{u} ) ){ + if ( -f $cache ){ + my $old=''; + open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; + # if this is over 2048, something is most likely wrong + read($readfh , $old , 10240); + close($readfh); + print $old; + }else{ + $opts{u}=1; + $noWrite=1; + + } +} + if (defined( $opts{u} )){ #gets a list of jails @@ -121,21 +139,17 @@ if (defined( $opts{u} )){ } - ## + ## ## render the output ## - open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; - print $writefh $total."\n".$firewalled."\n".$toReturn; - close($writefh); + if ( ! $noWrite ){ + open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; + print $writefh $total."\n".$firewalled."\n".$toReturn; + close($writefh); + }else{ + print $total."\n".$firewalled."\n".$toReturn; + } + exit 0; } - - -my $old=''; -open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; -# if this is over 2048, something is most likely wrong -read($readfh , $old , 10240); -close($readfh); -print $old; - From 2ff9de89f2fc1062d83d2461a025a0b84db552f2 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 23 Feb 2017 08:40:59 -0600 Subject: [PATCH 097/497] misc. path cleanups --- snmp/fail2ban | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index a744eae1c..522e0cbba 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -4,10 +4,10 @@ # extend fail2ban /etc/snmp/fail2ban # # Then add to your cron tab, if you wish to use caching. -# */3 * * * * /root/snmp-extends/fail2ban.pl -u +# */3 * * * * /etc/snmp/fail2ban.pl -u #make sure this path is correct -my $f2bc="/usr/local/bin/fail2ban-client"; +my $f2bc="/usr/bin/env fail2ban-client"; # The cache file to use, if using caching. my $cache='/var/cache/fail2ban'; @@ -100,7 +100,7 @@ if (defined( $opts{u} )){ }; if ( $os =~ '^Linux' ){ - my $iptables=`iptables -L -n`; + my $iptables=`/usr/bin/env iptables -L -n`; my @iptablesA=split( /\n/, $iptables ); #check each line From 7f41e14159db5f56ae0d654f23229f2079d96499 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 23 Feb 2017 08:45:04 -0600 Subject: [PATCH 098/497] poke the user about setting a iptables path as well --- snmp/fail2ban | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 522e0cbba..129b8a5e4 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -9,6 +9,9 @@ #make sure this path is correct my $f2bc="/usr/bin/env fail2ban-client"; +#make sure this path is correct +my $iptables="/usr/bin/env iptables"; + # The cache file to use, if using caching. my $cache='/var/cache/fail2ban'; @@ -100,7 +103,7 @@ if (defined( $opts{u} )){ }; if ( $os =~ '^Linux' ){ - my $iptables=`/usr/bin/env iptables -L -n`; + my $iptables=`$iptables -L -n`; my @iptablesA=split( /\n/, $iptables ); #check each line From cf980ed891fe552002b6d7087fc2ae5f1cc8a24c Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 23 Feb 2017 08:46:18 -0600 Subject: [PATCH 099/497] don't reuse the variable $iptables --- snmp/fail2ban | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 129b8a5e4..de1bd0907 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -10,7 +10,7 @@ my $f2bc="/usr/bin/env fail2ban-client"; #make sure this path is correct -my $iptables="/usr/bin/env iptables"; +my $iptablesPath="/usr/bin/env iptables"; # The cache file to use, if using caching. my $cache='/var/cache/fail2ban'; @@ -103,7 +103,7 @@ if (defined( $opts{u} )){ }; if ( $os =~ '^Linux' ){ - my $iptables=`$iptables -L -n`; + my $iptables=`$iptablesPath -L -n`; my @iptablesA=split( /\n/, $iptables ); #check each line From 3b0287bc0c9420686115e436c959698bb3eaf01b Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 23 Feb 2017 09:13:59 -0600 Subject: [PATCH 100/497] use $f2bc for getting jail status now and not just only overall status --- snmp/fail2ban | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index de1bd0907..f1b785834 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -76,7 +76,7 @@ if (defined( $opts{u} )){ while(defined($jails[$int])){ #get the total for this jail - my $jailStatusOutput=`fail2ban-client status $jails[$int]`; + my $jailStatusOutput=`$f2bc status $jails[$int]`; my @jailStatusOutputA=split(/\n/, $jailStatusOutput); my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); $jailTotal=~s/.*\://; From 27b4b7100d402319a125823cc27ed9b0a1c2d4c7 Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 23 Feb 2017 09:54:38 -0600 Subject: [PATCH 101/497] if cache older than 360 seconds, don't use it --- snmp/fail2ban | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index f1b785834..18934e294 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -45,7 +45,12 @@ getopts('u', \%opts); my $noWrite=0; if ( ! defined( $opts{u} ) ){ - if ( -f $cache ){ + my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, + $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); + my $age=time-$mtime; + + + if (( -f $cache )&&( $age < 360 )){ my $old=''; open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; # if this is over 2048, something is most likely wrong @@ -55,7 +60,6 @@ if ( ! defined( $opts{u} ) ){ }else{ $opts{u}=1; $noWrite=1; - } } From f8bca7d9833aefb303935780a868b1a2486edebf Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Fri, 24 Feb 2017 11:02:19 -0600 Subject: [PATCH 102/497] Redefining $firewalled --- snmp/fail2ban | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 18934e294..4076a62f2 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -112,7 +112,6 @@ if (defined( $opts{u} )){ #check each line my $int=0; - my $firewalled=0; my $count=0; while( defined( $iptablesA[$int] ) ){ my $line=$iptablesA[$int]; From a696473006e4d7b977019e1b3891dc8379b2d876 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Fri, 24 Feb 2017 11:09:21 -0600 Subject: [PATCH 103/497] Update fail2ban --- snmp/fail2ban | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 4076a62f2..95cf9e31c 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -47,10 +47,8 @@ my $noWrite=0; if ( ! defined( $opts{u} ) ){ my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); - my $age=time-$mtime; - - if (( -f $cache )&&( $age < 360 )){ + if (( -f $cache ) && defined( $mtime ) && ( (time-$mtime) < 360 )){ my $old=''; open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; # if this is over 2048, something is most likely wrong From 677e59a76fbfb23d34df7f7ad5665b4aae402043 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 1 Mar 2017 12:35:06 -0600 Subject: [PATCH 104/497] add detailed Postfix poller (#92) * add detailed postfix poller * env perl --- snmp/postfixdetailed | 524 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 524 insertions(+) create mode 100644 snmp/postfixdetailed diff --git a/snmp/postfixdetailed b/snmp/postfixdetailed new file mode 100644 index 000000000..20d58946e --- /dev/null +++ b/snmp/postfixdetailed @@ -0,0 +1,524 @@ +#!/usr/bin/env perl + +# add this to your snmpd.conf file as below +# extend postfixdetailed /etc/snmp/postfixdetailed + +# The cache file to use. +my $cache='/var/cache/postfixdetailed'; + +# the location of pflogsumm +my $pflogsumm='/usr/bin/env pflogsumm'; + +#totals +# 847 received = received +# 852 delivered = delivered +# 0 forwarded = forwarded +# 3 deferred (67 deferrals)= deferred +# 0 bounced = bounced +# 593 rejected (41%) = rejected +# 0 reject warnings = rejectw +# 0 held = held +# 0 discarded (0%) = discarded + +# 16899k bytes received = bytesr +# 18009k bytes delivered = bytesd +# 415 senders = senders +# 266 sending hosts/domains = sendinghd +# 15 recipients = recipients +# 9 recipient hosts/domains = recipienthd + +######message deferral detail +#Connection refused = deferralcr +#Host is down = deferralhid + +########message reject detail +#Client host rejected = chr +#Helo command rejected: need fully-qualified hostname = hcrnfqh +#Sender address rejected: Domain not found = sardnf +#Sender address rejected: not owned by user = sarnobu +#blocked using = bu +#Recipient address rejected: User unknown = raruu +#Helo command rejected: Invalid name = hcrin +#Sender address rejected: need fully-qualified address = sarnfqa +#Recipient address rejected: Domain not found = rardnf +#Recipient address rejected: need fully-qualified address = rarnfqa +#Improper use of SMTP command pipelining = iuscp +#Message size exceeds fixed limit = msefl +#Server configuration error = sce +#Server configuration problem = scp +#unknown reject reason = urr + +my $old=''; + +#reads in the old data if it exists +if ( -f $cache ){ + open(my $fh, "<", $cache) or die "Can't open '".$cache."'"; + # if this is over 2048, something is most likely wrong + read($fh , $old , 2048); + close($fh); +} + +my ( $received, + $delivered, + $forwarded, + $deferred, + $bounced, + $rejected, + $rejectw, + $held, + $discarded, + $bytesr, + $bytesd, + $senders, + $sendinghd, + $recipients, + $recipienthd, + $deferralcr, + $deferralhid, + $chr, + $hcrnfqh, + $sardnf, + $sarnobu, + $bu, + $raruu, + $hcrin, + $sarnfqa, + $rardnf, + $rarnfqa, + $iuscp, + $sce, + $scp, + $urr) = split ( /\n/, $old ); + +if ( ! defined( $received ) ){ $received=0; } +if ( ! defined( $delivered ) ){ $delivered=0; } +if ( ! defined( $forwarded ) ){ $forwarded=0; } +if ( ! defined( $deferred ) ){ $deferred=0; } +if ( ! defined( $bounced ) ){ $bounced=0; } +if ( ! defined( $rejected ) ){ $rejected=0; } +if ( ! defined( $rejectw ) ){ $rejectw=0; } +if ( ! defined( $held ) ){ $held=0; } +if ( ! defined( $discarded ) ){ $discarded=0; } +if ( ! defined( $bytesr ) ){ $bytesr=0; } +if ( ! defined( $bytesd ) ){ $bytesd=0; } +if ( ! defined( $senders ) ){ $senders=0; } +if ( ! defined( $sendinghd ) ){ $sendinghd=0; } +if ( ! defined( $recipients ) ){ $recipients=0; } +if ( ! defined( $recipienthd ) ){ $recipienthd=0; } +if ( ! defined( $deferralcr ) ){ $deferralcr=0; } +if ( ! defined( $deferralhid ) ){ $deferralhid=0; } +if ( ! defined( $chr ) ){ $chr=0; } +if ( ! defined( $hcrnfqh ) ){ $hcrnfqh=0; } +if ( ! defined( $sardnf ) ){ $sardnf=0; } +if ( ! defined( $sarnobu ) ){ $sarnobu=0; } +if ( ! defined( $bu ) ){ $bu=0; } +if ( ! defined( $raruu ) ){ $raruu=0; } +if ( ! defined( $hcrin ) ){ $hcrin=0; } +if ( ! defined( $sarnfqa ) ){ $sarnfqa=0; } +if ( ! defined( $rardnf ) ){ $rardnf=0; } +if ( ! defined( $rarnfqa ) ){ $rarnfqa=0; } +if ( ! defined( $iuscp ) ){ $iuscp=0; } +if ( ! defined( $sce ) ){ $sce=0; } +if ( ! defined( $scp ) ){ $scp=0; } +if ( ! defined( $urr ) ){ $urr=0; } + +#init current variables +my $receivedC=0; +my $deliveredC=0; +my $forwardedC=0; +my $deferredC=0; +my $bouncedC=0; +my $rejectedC=0; +my $rejectwC=0; +my $heldC=0; +my $discardedC=0; +my $bytesrC=0; +my $bytesdC=0; +my $sendersC=0; +my $sendinghdC=0; +my $recipientsC=0; +my $recipienthdC=0; +my $deferralcrC=0; +my $deferralhidC=0; +my $chrC=0; +my $hcrnfqhC=0; +my $sardnfC=0; +my $sarnobuC=0; +my $buC=0; +my $raruuC=0; +my $hcrinC=0; +my $sarnfqaC=0; +my $rardnfC=0; +my $rarnfqaC=0; +my $iuscpC=0; +my $sceC=0; +my $scpC=0; +my $urrC=0; + +sub newValue{ + my $old=$_[0]; + my $new=$_[1]; + + #if new is undefined, just default to 0... this should never happen + if ( !defined( $new ) ){ + warn('New not defined'); + return 0; + } + + #sets it to 0 if old is not defined + if ( !defined( $old ) ){ + warn('Old not defined'); + $old=0; + } + + #make sure they are both numberic and if not set to zero + if( $old !~ /^[0123456789]*$/ ){ + warn('Old not numeric'); + $old=0; + } + if( $new !~ /^[0123456789]*$/ ){ + warn('New not numeric'); + $new=0; + } + + #log rotation happened + if ( $old > $new ){ + return $new; + }; + + return $new - $old; +} + + +my $output=`$pflogsumm /var/log/maillog`; + +#holds RBL values till the end when it is compared to the old one +my $buNew=0; + +#holds the current values for checking later +my $current=''; + +my @outputA=split( /\n/, $output ); +my $int=0; +while ( defined( $outputA[$int] ) ){ + my $line=$outputA[$int]; + + $line=~s/^ *//; + $line=~s/ +/ /g; + $line=~s/\)$//; + + my $handled=0; + + #received line + if ( ( $line =~ /[0123456789] received$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $receivedC=$line; + $received=newValue( $received, $line ); + $handled=1; + } + + #delivered line + if ( ( $line =~ /[0123456789] delivered$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $deliveredC=$line; + $delivered=newValue( $delivered, $line ); + $handled=1; + } + + #forward line + if ( ( $line =~ /[0123456789] forwarded$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $forwardedC=$line; + $forwarded=newValue( $forwarded, $line ); + $handled=1; + } + + #defereed line + if ( ( $line =~ /[0123456789] deferred \(/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $deferredC=$line; + $deferred=newValue( $deferred, $line ); + $handled=1; + } + + #bounced line + if ( ( $line =~ /[0123456789] bounced$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $bouncedC=$line; + $bounced=newValue( $bounced, $line ); + $handled=1; + } + + #rejected line + if ( ( $line =~ /[0123456789] rejected \(/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $rejectedC=$line; + $rejected=newValue( $rejected, $line ); + $handled=1; + } + + #reject warning line + if ( ( $line =~ /[0123456789] reject warnings/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $rejectwC=$line; + $rejectw=newValue( $rejectw, $line ); + $handled=1; + } + + #held line + if ( ( $line =~ /[0123456789] held$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $heldC=$line; + $held=newValue( $held, $line ); + $handled=1; + } + + #discarded line + if ( ( $line =~ /[0123456789] discarded \(/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $discardedC=$line; + $discarded=newValue( $discarded, $line ); + $handled=1; + } + + #bytes received line + if ( ( $line =~ /[0123456789kM] bytes received$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $line=~s/k/000/; + $line=~s/M/000000/; + $bytesrC=$line; + $bytesr=newValue( $bytesr, $line ); + $handled=1; + } + + #bytes delivered line + if ( ( $line =~ /[0123456789kM] bytes delivered$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $line=~s/k/000/; + $line=~s/M/000000/; + $bytesdC=$line; + $bytesd=newValue( $bytesd, $line ); + $handled=1; + } + + #senders line + if ( ( $line =~ /[0123456789] senders$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $sendersC=$line; + $senders=newValue( $senders, $line ); + $handled=1; + } + + #sendering hosts/domains line + if ( ( $line =~ /[0123456789] sending hosts\/domains$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $sendinghdC=$line; + $sendinghd=newValue( $sendinghd, $line ); + $handled=1; + } + + #recipients line + if ( ( $line =~ /[0123456789] recipients$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $recipientsC=$line; + $recipients=newValue( $recipients, $line ); + $handled=1; + } + + #recipients line + if ( ( $line =~ /[0123456789] recipient hosts\/domains$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $recipienthdC=$line; + $recipienthd=newValue( $recipienthd, $line ); + $handled=1; + } + + # deferrals connectios refused + if ( ( $line =~ /[0123456789] 25\: Connection refused$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $deferralcrC=$line; + $deferralcr=newValue( $deferralcr, $line ); + $handled=1; + } + + # deferrals Host is down + if ( ( $line =~ /Host is down$/ ) && ( ! $handled ) ){ + $line=~s/ .*//; + $deferralcrC=$line; + $deferralcr=newValue( $deferralcr, $line ); + $handled=1; + } + + # Client host rejected + if ( ( $line =~ /Client host rejected/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $chrC=$line; + $chr=newValue( $chr, $line ); + $handled=1; + } + + #Helo command rejected: need fully-qualified hostname + if ( ( $line =~ /Helo command rejected\: need fully\-qualified hostname/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $hcrnfqhC=$line; + $hcrnfqh=newValue( $hcrnfqh, $line ); + $handled=1; + } + + #Sender address rejected: Domain not found + if ( ( $line =~ /Sender address rejected\: Domain not found/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $sardnfC=$line; + $sardnf=newValue( $sardnf, $line ); + $handled=1; + } + + #Sender address rejected: not owned by user + if ( ( $line =~ /Sender address rejected\: not owned by user/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $sarnobuC=$line; + $sarnobu=newValue( $sarnobu, $line ); + $handled=1; + } + + #blocked using + # These lines are RBLs so there will be more than one. + # Use $buNew to add them all up. + if ( ( $line =~ /blocked using/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $buNew=$buNew + $line; + $handled=1; + } + + #Recipient address rejected: User unknown + if ( ( $line =~ /Recipient address rejected\: User unknown/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $raruuC=$line; + $raruu=newValue( $raruu, $line ); + $handled=1; + } + + #Helo command rejected: Invalid name + if ( ( $line =~ /Helo command rejected\: Invalid name/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $hcrinC=$line; + $hcrin=newValue( $hcrin, $line ); + } + + #Sender address rejected: need fully-qualified address + if ( ( $line =~ /Sender address rejected\: need fully-qualified address/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $sarnfqaC=$line; + $sarnfqa=newValue( $sarnfqa, $line ); + } + + #Recipient address rejected: Domain not found + if ( ( $line =~ /Recipient address rejected\: Domain not found/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $rardnfC=$line; + $rardnf=newValue( $rardnf, $line ); + } + + #Improper use of SMTP command pipelining + if ( ( $line =~ /Improper use of SMTP command pipelining/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $iuoscpC=$line; + $iuoscp=newValue( $iuoscp, $line ); + } + + #Message size exceeds fixed limit + if ( ( $line =~ /Message size exceeds fixed limit/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $mseflC=$line; + $msefl=newValue( $msefl, $line ); + } + + #Server configuration error + if ( ( $line =~ /Server configuration error/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $sceC=$line; + $sce=newValue( $sce, $line ); + } + + #Server configuration problem + if ( ( $line =~ /Server configuration problem/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $scpC=$line; + $scp=newValue( $scp, $line ); + } + + $int++; +} + +# final RBL total +$bu=newValue( $bu, $buNew ); + +my $data=$received."\n". + $delivered."\n". + $forwarded."\n". + $deferred."\n". + $bounced."\n". + $rejected."\n". + $rejectw."\n". + $held."\n". + $discarded."\n". + $bytesr."\n". + $bytesd."\n". + $senders."\n". + $sendinghd."\n". + $recipients."\n". + $recipienthd."\n". + $deferralcr."\n". + $deferralhid."\n". + $chr."\n". + $hcrnfqh."\n". + $sardnf."\n". + $sarnobu."\n". + $bu."\n". + $raruu."\n". + $hcrin."\n". + $sarnfqa."\n". + $rardnf."\n". + $rarnfqa."\n". + $iuscp."\n". + $sce."\n". + $scp."\n". + $urr."\n"; + +print $data; + +my $current=$receivedC."\n". + $deliveredC."\n". + $forwardedC."\n". + $deferredC."\n". + $bouncedC."\n". + $rejectedC."\n". + $rejectwC."\n". + $heldC."\n". + $discardedC."\n". + $bytesrC."\n". + $bytesdC."\n". + $sendersC."\n". + $sendinghdC."\n". + $recipientsC."\n". + $recipienthdC."\n". + $deferralcrC."\n". + $deferralhidC."\n". + $chrC."\n". + $hcrnfqhC."\n". + $sardnfC."\n". + $sarnobuC."\n". + $buNew."\n". + $raruuC."\n". + $hcrinC."\n". + $sarnfqaC."\n". + $rardnfC."\n". + $rarnfqaC."\n". + $iuscpC."\n". + $sceC."\n". + $scpC."\n". + $urrC."\n"; + +open(my $fh, ">", $cache) or die "Can't open '".$cache."'"; +print $fh $current; +close($fh); From 56a51d709227951beab9f2a145fd7e9cd0f0c60a Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 1 Mar 2017 17:46:13 -0600 Subject: [PATCH 105/497] add Postgres SNMP extend (#91) * add Postgres SNMP extend * minor comment cleanups * use env for check_postgres.pl * quote the string --- snmp/postgres | 131 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 131 insertions(+) create mode 100644 snmp/postgres diff --git a/snmp/postgres b/snmp/postgres new file mode 100644 index 000000000..5897d0618 --- /dev/null +++ b/snmp/postgres @@ -0,0 +1,131 @@ +#!/bin/sh +#Copyright (c) 2017, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +#set the user here to use +#be sure to set up the user in .pgpass for the user snmpd is running as +DBuser=pgsql + +# You may want to disable totalling for the postgres DB as that can make the total graphs artificially noisy. +# 1 = don't total stats for the DB postgres +# 0 = include postgres in the totals +ignorePG=1; + +#make sure the paths are right for your system +cpg='/usr/bin/env check_postgres.pl' + +$cpg -u $DBuser --action dbstats | awk -F ' ' ' + +BEGIN{ + backends=0; + commits=0; + rollbacks=0; + read=0; + hit=0; + idxscan=0; + idxtupread=0; + idxtupfetch=0; + idxblksread=0; + idxblkshit=0; + seqscan=0; + seqtupread=0; + ret=0; + fetch=0; + ins=0; + upd=0; + del=0; + db=""; + ignorePG='$ignorePG'; + toAdd=1; +} +{ + gsub(/dbname\:/, ""); + gsub(/backends\:/, ""); + gsub(/commits\:/, ""); + gsub(/rollbacks\:/, ""); + gsub(/idxscan\:/, ""); + gsub(/idxtupread\:/, ""); + gsub(/idxtupfetch\:/, ""); + gsub(/idxblksread\:/, ""); + gsub(/idxblkshit\:/, ""); + gsub(/seqscan\:/, ""); + gsub(/seqtupread\:/, ""); + gsub(/ret\:/, ""); + gsub(/fetch\:/, ""); + gsub(/ins\:/, ""); + gsub(/upd\:/, ""); + gsub(/del\:/, ""); + #must be processed last or they step on other gsub + gsub(/read\:/, ""); + gsub(/hit\:/, ""); + + if ( $18 == "postgres" ){ + if ( ignorePG == 1 ){ toAdd=0 } + } + + if ( toAdd == 1 ){ + backends=backends+$1; + commits=commits+$2; + rollbacks=rollbacks+$3; + idxscan=idxscan+$6; + idxtupread=idxtupread+$7; + idxtupfetch=idxtupfetch+$8; + idxblksread=idxblksread+$9; + idxblkshit=idxblkshit+$10; + seqscan=seqscan+$11; + seqtupread=seqtupread+$12; + ret=ret+$13; + fetch=fetch+$14; + ins=ins+$15; + upd=upd+$16; + del=del+$17; + read=read+$4; + hit=hit+$5; + } + + db=db$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8" "$9" "$10" "$11" "$12" "$13" "$14" "$15" "$16" "$17" "$18"\n"; + + toAdd=1; +} +END{ + print backends; + print commits; + print rollbacks; + print read; + print hit; + print idxscan; + print idxtupread; + print idxtupfetch; + print idxblksread; + print idxblkshit; + print seqscan; + print seqtupread; + print ret; + print fetch; + print ins; + print upd; + print del; + print db; +} +' + From c6a5a479a32b3ca3a0a7110b5641a04ce81ccbfd Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 3 Mar 2017 08:49:15 -0600 Subject: [PATCH 106/497] FreeBSD NFS extends (#90) * add the FreeBSD NFS client and server extends * white space cleanup * white space cleanup --- snmp/fbsdnfsclient | 181 +++++++++++++++++++++++++++++++++++++++++++++ snmp/fbsdnfsserver | 151 +++++++++++++++++++++++++++++++++++++ 2 files changed, 332 insertions(+) create mode 100644 snmp/fbsdnfsclient create mode 100644 snmp/fbsdnfsserver diff --git a/snmp/fbsdnfsclient b/snmp/fbsdnfsclient new file mode 100644 index 000000000..f41c7b606 --- /dev/null +++ b/snmp/fbsdnfsclient @@ -0,0 +1,181 @@ +#!/usr/local/bin/perl + +# Add this to snmpd.conf as below. +# extend fbsdnfsclient /etc/snmp/fbsdnfsclient + +my $nfsstatOutput=`/usr/bin/nfsstat`; +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; + +my ( + $Getattr, + $Setattr, + $Lookup, + $Readlink, + $Read, + $Write, + $Create, + $Remove, + $Rename, + $Link, + $Symlink, + $Mkdir, + $Rmdir, + $Readdir, + $RdirPlus, + $Access, + $Mknod, + $Fsstat, + $Fsinfo, + $PathConf, + $Commit, + $TimedOut, + $Invalid, + $XReplies, + $Retries, + $Requests, + $AttrHits, + $AttrMisses, + $LkupHits, + $LkupMisses, + $BioRHits, + $BioRMisses, + $BioWHits, + $BioWMisses, + $BioRLHits, + $BioRLMisses, + $BioDHits, + $BioDMisses, + $DirEHits, + $DirEMisses, + $AccsHits, + $AccsMisses, + ); + +while( defined( $nfsstatOutputA[$int] ) ){ + $nfsstatOutputA[$int]=~s/^ +//; + $nfsstatOutputA[$int]=~s/ +/ /g; + + if ( $int == 3 ){ + ( + $Getattr, + $Setattr, + $Lookup, + $Readlink, + $Read, + $Write, + $Create, + $Remove, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 5 ){ + ( + $Rename, + $Link, + $Symlink, + $Mkdir, + $Rmdir, + $Readdir, + $RdirPlus, + $Access, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 7 ){ + ( + $Mknod, + $Fsstat, + $Fsinfo, + $PathConf, + $Commit, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 10 ){ + ( + $TimedOut, + $Invalid, + $XReplies, + $Retries, + $Requests, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 13 ){ + ( + $AttrHits, + $AttrMisses, + $LkupHits, + $LkupMisses, + $BioRHits, + $BioRMisses, + $BioWHits, + $BioWMisses, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 15 ){ + ( + $BioRLHits, + $BioRLMisses, + $BioDHits, + $BioDMisses, + $DirEHits, + $DirEMisses, + $AccsHits, + $AccsMisses, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + $int++; +} + +print $Getattr."\n". + $Setattr."\n". + $Lookup."\n". + $Readlink."\n". + $Read."\n". + $Write."\n". + $Create."\n". + $Remove."\n". + $Rename."\n". + $Link."\n". + $Symlink."\n". + $Mkdir."\n". + $Rmdir."\n". + $Readdir."\n". + $RdirPlus."\n". + $Access."\n". + $Mknod."\n". + $Fsstat."\n". + $Fsinfo."\n". + $PathConf."\n". + $Commit."\n". + $TimedOut."\n". + $Invalid."\n". + $XReplies."\n". + $Retries."\n". + $Requests."\n". + $AttrHits."\n". + $AttrMisses."\n". + $LkupHits."\n". + $LkupMisses."\n". + $BioRHits."\n". + $BioRMisses."\n". + $BioWHits."\n". + $BioWMisses."\n". + $BioRLHits."\n". + $BioRLMisses."\n". + $BioDHits."\n". + $BioDMisses."\n". + $DirEHits."\n". + $DirEMisses."\n". + $AccsHits."\n". + $AccsMisses."\n"; diff --git a/snmp/fbsdnfsserver b/snmp/fbsdnfsserver new file mode 100644 index 000000000..4664cfa61 --- /dev/null +++ b/snmp/fbsdnfsserver @@ -0,0 +1,151 @@ +#!/usr/local/bin/perl + +# Add this to snmpd.conf as below. +# extend fbsdnfsserver /etc/snmp/fbsdnfsserver + +my $nfsstatOutput=`/usr/bin/nfsstat`; +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; + +my ( + $Getattr, + $Setattr, + $Lookup, + $Readlink, + $Read, + $Write, + $Create, + $Remove, + $Rename, + $Link, + $Symlink, + $Mkdir, + $Rmdir, + $Readdir, + $RdirPlus, + $Access, + $Mknod, + $Fsstat, + $Fsinfo, + $PathConf, + $Commit, + $RetFailed, + $Faults, + $Inprog, + $Idem, + $Nonidem, + $Misses, + $WriteOps, + $WriteRPC, + $Opsaved + ); + +while( defined( $nfsstatOutputA[$int] ) ){ + $nfsstatOutputA[$int]=~s/^ +//; + $nfsstatOutputA[$int]=~s/ +/ /g; + + if ( $int == 19 ){ + ( + $Getattr, + $Setattr, + $Lookup, + $Readlink, + $Read, + $Write, + $Create, + $Remove, + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 21 ){ + ( + $Rename, + $Link, + $Symlink, + $Mkdir, + $Rmdir, + $Readdir, + $RdirPlus, + $Access + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 23 ){ + ( + $Mknod, + $Fsstat, + $Fsinfo, + $PathConf, + $Commit + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 25 ){ + ( + $RetFailed + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 27 ){ + ( + $Faults + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 30 ){ + ( + $Inprog, + $Idem, + $Nonidem, + $Misses + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + if ( $int == 33 ){ + ( + $WriteOps, + $WriteRPC, + $Opsaved + )=split( /\ /, $nfsstatOutputA[$int] ); + + } + + $int++; +} + +print $Getattr."\n". + $Setattr."\n". + $Lookup."\n". + $Readlink."\n". + $Read."\n". + $Write."\n". + $Create."\n". + $Remove."\n". + $Rename."\n". + $Link."\n". + $Symlink."\n". + $Mkdir."\n". + $Rmdir."\n". + $Readdir."\n". + $RdirPlus."\n". + $Access."\n". + $Mknod."\n". + $Fsstat."\n". + $Fsinfo."\n". + $PathConf."\n". + $Commit."\n". + $RetFailed."\n". + $Faults."\n". + $Inprog."\n". + $Idem."\n". + $Nonidem."\n". + $Misses."\n". + $WriteOps."\n". + $WriteRPC."\n". + $Opsaved."\n"; From 856eb8dfac3423066676fc9404391cd361417a16 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 3 Mar 2017 12:55:55 -0600 Subject: [PATCH 107/497] add squid snmp extend (#93) --- snmp/squid | 74 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) create mode 100644 snmp/squid diff --git a/snmp/squid b/snmp/squid new file mode 100644 index 000000000..633db4170 --- /dev/null +++ b/snmp/squid @@ -0,0 +1,74 @@ +#!/bin/sh + +# Add this to snmpd.conf as below. +# extend squid /etc/snmp/squid + +# To get this working smoothly and securely, you can add the items below to your squid.conf. +# acl snmppublic snmp_community public +# snmp_port 3401 +# snmp_access allow snmppublic localhost +# snmp_access deny all + + +# set this as being equal to the value of 'acl snmppublic snmp_community' in squid.conf +community='public' + +# set this as being equal to the value of 'snmp_port' in squid.conf +port='3401' + +# the full path to snmpwalk +snmpwalk='/usr/bin/env snmpwalk' + +## +## Nothing Should Need Changed Below Here +## + +# cacheMemMaxSize Integer32 +# cacheSwapMaxSize Integer32 +# cacheSwapHighWM Integer32 +# cacheSwapLowWM Integer32 +$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.2.5 + +# cacheSysPageFaults Counter32 +# cacheSysNumReads Counter32 +# cacheMemUsage Integer32 +# cacheCpuTime Integer32 +# cacheCpuUsage Integer32 +# cacheMaxResSize Integer32 +# cacheNumObjCount Gauge32 +# cacheCurrentLRUExpiration Timeticks +# Storage LRU Expiration Age +# cacheCurrentUnlinkRequests Gauge32 +# cacheCurrentUnusedFDescrCnt Gauge32 +# cacheCurrentResFileDescrCnt Gauge32 +# cacheCurrentFileDescrCnt Gauge32 +# cacheCurrentFileDescrMax Gauge32 +$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.1 + +# cacheProtoClientHttpRequests Counter32 +# cacheHttpHits Counter32 +# cacheHttpErrors Counter32 +# cacheHttpInKb Counter32 +# cacheHttpOutKb Counter32 +# cacheIcpPktsSent Counter32 +# cacheIcpPktsRecv Counter32 +# cacheIcpKbSent Counter32 +# cacheIcpKbRecv Counter32 +# cacheServerRequests Integer32 +# cacheServerErrors Integer32 +# cacheServerInKb Counter32 +# cacheServerOutKb Counter32 +# cacheCurrentSwapSize Gauge32 +# cacheClients Gauge32 +$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.1 + +# cacheRequestHitRatio.1 Integer32 +# cacheRequestHitRatio.5 Integer32 +# cacheRequestHitRatio.60 Integer32 +$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.9 + +# cacheRequestByteRatio.1 Integer32 +# cacheRequestByteRatio.5 Integer32 +# cacheRequestByteRatio.60 Integer32 +$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.10 + From 857a5351ad2a8fb320ee6e9eeffc4cf0ecb73778 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 3 Mar 2017 14:41:38 -0600 Subject: [PATCH 108/497] add Nvidia SNMP extend poller (#94) * add Nvidia SNMP extend * update the extend path * now support more than 4 GPUs this will now support how ever many GPUs are installed on a system... Just double checked and it appears nvidia-smi dmon only reports up to 4 GPUs at a time... so if we have more than 4, begin checking they exist and if so print them --- snmp/nvidia | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 snmp/nvidia diff --git a/snmp/nvidia b/snmp/nvidia new file mode 100644 index 000000000..d9d73a755 --- /dev/null +++ b/snmp/nvidia @@ -0,0 +1,39 @@ +#!/bin/sh + +# Add this to snmpd.conf as below. +# extend nvidia /etc/snmps/nvidia + +# Please verify the following paths are correct +nvidiasmi='/usr/bin/env nvidia-smi' +grep='/usr/bin/env grep' +sed='/usr/bin/env sed' + +## +## Nothing below here should need touched. +## + +## gpu pwr temp sm mem enc dec mclk pclk pviol tviol fb bar1 sbecc dbecc pci rxpci txpci +## Idx W C % % % % MHz MHz % bool MB MB errs errs errs MB/s MB/s +# 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3 +$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' + +lines=`$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l` + +# if we are less than 5 then all GPUs were printed +if [ $lines -lt 5 ]; then + exit 0; +fi + +gpu=5 +loop=1 +while [ $loop -eq 1 ] +do + $nvidiasmi dmon -c 1 -i $gpu > /dev/null + if [ $? -eq 0 ]; then + $nvidiasmi dmon -c 1 -s pucvmet -i $gpu | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' + else + loop=0 + fi + + gpu=`expr $gpu + 1` +done From 56a75ac6a7bab549d4599ec9865eaea7f5ba79c4 Mon Sep 17 00:00:00 2001 From: VVelox Date: Tue, 7 Mar 2017 23:40:09 -0600 Subject: [PATCH 109/497] clean up snmp/mysql_stats.php and make it a proper snmpd extend script now (#99) * cleanup and make it something that can properly be invoked via cli * blank the user/pass/host bits increasing the chances it will work out of the box * Update mysql_stats.php * Update mysql_stats.php * Update mysql_stats.php * Update mysql_stats.php * Rename mysql_stats.php to mysql --- snmp/{mysql_stats.php => mysql} | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) rename snmp/{mysql_stats.php => mysql} (98%) diff --git a/snmp/mysql_stats.php b/snmp/mysql similarity index 98% rename from snmp/mysql_stats.php rename to snmp/mysql index 89522a61e..811192c9b 100755 --- a/snmp/mysql_stats.php +++ b/snmp/mysql @@ -1,13 +1,12 @@ +#!/usr/bin/env php Date: Tue, 7 Mar 2017 23:48:15 -0600 Subject: [PATCH 110/497] Update mysql --- snmp/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mysql b/snmp/mysql index 811192c9b..698ac535d 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -1,7 +1,7 @@ #!/usr/bin/env php Date: Wed, 8 Mar 2017 09:51:04 +0100 Subject: [PATCH 111/497] Enable ipv6 in Xinetd (#100) * Fix indenting and enable IPv6 in Xinetd * Fix changelog * Typo --- check_mk_xinetd | 37 +++++++++++++++++++------------------ debian/changelog | 7 +++++++ 2 files changed, 26 insertions(+), 18 deletions(-) diff --git a/check_mk_xinetd b/check_mk_xinetd index 9fefd584c..fcc44d935 100644 --- a/check_mk_xinetd +++ b/check_mk_xinetd @@ -24,26 +24,27 @@ service check_mk { - type = UNLISTED - port = 6556 - socket_type = stream - protocol = tcp - wait = no - user = root - server = /usr/bin/check_mk_agent + type = UNLISTED + port = 6556 + socket_type = stream + protocol = tcp + bind = :: + wait = no + user = root + server = /usr/bin/check_mk_agent - # If you use fully redundant monitoring and poll the client - # from more then one monitoring servers in parallel you might - # want to use the agent cache wrapper: - #server = /usr/bin/check_mk_caching_agent + # If you use fully redundant monitoring and poll the client + # from more then one monitoring servers in parallel you might + # want to use the agent cache wrapper: + #server = /usr/bin/check_mk_caching_agent - # configure the IP address(es) of your Nagios server here: - #only_from = 127.0.0.1 10.0.20.1 10.0.20.2 + # configure the IP address(es) of your Nagios server here: + #only_from = 127.0.0.1 10.0.20.1 10.0.20.2 - # Don't be too verbose. Don't log every check. This might be - # commented out for debugging. If this option is commented out - # the default options will be used for this service. - log_on_success = + # Don't be too verbose. Don't log every check. This might be + # commented out for debugging. If this option is commented out + # the default options will be used for this service. + log_on_success = - disable = no + disable = no } diff --git a/debian/changelog b/debian/changelog index 39a11a1e3..222de759c 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,10 @@ +librenms-agent (1.0.7) stable; urgency=low + + - New upstream versions + - Enable IPv6 for Xinetd + + -- Mark Schouten Wed, 08 Mar 2017 00:36:00 +0200 + librenms-agent (1.0.6) stable; urgency=low - Fix a dirty hack to prevent failing of stats when the cluster is rebuilding From bad68744f903e8c2494360523894faef16882570 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Fri, 10 Mar 2017 06:29:19 -0600 Subject: [PATCH 112/497] Update mysql script to php7 version... (#104) * Update mysql script to php7 version... * Update mysql --- snmp/mysql | 78 +++++++++++++++++++++++++++++++----------------------- 1 file changed, 45 insertions(+), 33 deletions(-) diff --git a/snmp/mysql b/snmp/mysql index 698ac535d..8a2d05a97 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -6,6 +6,8 @@ # Enter the correct information to connect to your mysql server in mysql.cnf or below. +### This script requires php-cli and php-mysql packages + # ============================================================================ # This program is copyright (c) 2007 Baron Schwartz. Feedback and improvements # are welcome. @@ -36,15 +38,20 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ +# Instead of defining parameters here, you can define them in another +# file named the same as this file, with a .cnf extension. +# ============================================================================ -$mysql_user = 'root'; +$mysql_user = ''; $mysql_pass = ''; $mysql_host = 'localhost'; $mysql_port = 3306; $mysql_ssl = FALSE; # Whether to use SSL to connect to MySQL. -$heartbeat = ''; # db.tbl in case you use mk-heartbeat from Maatkit. -$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. -$poll_time = 300; # Adjust to match your polling interval. + +$heartbeat = ''; # db.tbl in case you use mk-heartbeat from Maatkit. +$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$cache_time = 30; # How long to cache data. + $chk_options = array ( 'innodb' => true, # Do you want to check InnoDB statistics? 'master' => true, # Do you want to check binary logging? @@ -64,8 +71,12 @@ $version = "1.1.7"; # ============================================================================ # Include settings from an external config file (issue 39). # ============================================================================ + if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); +} else { + echo("No ".__FILE__ . ".cnf found!\n"); + exit(); } # Make this a happy little script even when there are errors. @@ -87,18 +98,18 @@ function error_handler($errno, $errstr, $errfile, $errline) { # ============================================================================ # Set up the stuff we need to be called by the script server. # ============================================================================ -if ($use_ss ) { - if (file_exists( dirname(__FILE__) . "/../include/global.php") ) { - # See issue 5 for the reasoning behind this. - debug("including " . dirname(__FILE__) . "/../include/global.php"); - include_once(dirname(__FILE__) . "/../include/global.php"); - } - elseif (file_exists( dirname(__FILE__) . "/../include/config.php" ) ) { - # Some Cacti installations don't have global.php. - debug("including " . dirname(__FILE__) . "/../include/config.php"); - include_once(dirname(__FILE__) . "/../include/config.php"); - } -} +#if ($use_ss ) { +# if (file_exists( dirname(__FILE__) . "/../include/global.php") ) { +# # See issue 5 for the reasoning behind this. +# debug("including " . dirname(__FILE__) . "/../include/global.php"); +# include_once(dirname(__FILE__) . "/../include/global.php"); +# } +# elseif (file_exists( dirname(__FILE__) . "/../include/config.php" ) ) { +# # Some Cacti installations don't have global.php. +# debug("including " . dirname(__FILE__) . "/../include/config.php"); +# include_once(dirname(__FILE__) . "/../include/config.php"); +# } +#} # ============================================================================ # Make sure we can also be called as a script. @@ -179,7 +190,7 @@ function usage($message) { $usage = << --items [OPTION] +Usage: ./mysql --host --items [OPTION] --host Hostname to connect to; use host:port syntax to specify a port Use :/path/to/socket if you want to connect via a UNIX socket @@ -241,7 +252,7 @@ function parse_cmdline( $args ) { # ============================================================================ function ss_get_mysql_stats( $options ) { # Process connection options and connect to MySQL. - global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $poll_time, + global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, $chk_options, $mysql_host, $mysql_port, $mysql_ssl; # Connect to MySQL. @@ -253,26 +264,26 @@ function ss_get_mysql_stats( $options ) { $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. - $host_str = $host - . $port != 3306 ? ":$port" : ''; + $host_str = $host.($port != 3306 ? ":$port" : ''); debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysql') ) { + if (!extension_loaded('mysqli') ) { debug("The MySQL extension is not loaded"); die("The MySQL extension is not loaded"); } if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = mysql_connect($host_str, $user, $pass, true, MYSQL_CLIENT_SSL); + $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, + $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); } else { - $conn = mysql_connect($host_str, $user, $pass); + $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); } if (!$conn ) { - die("MySQL: " . mysql_error()); + die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : + (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); } $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); - $cache_file = "$cache_dir/$sanitized_host-mysql_cacti_stats.txt" - . $port != 3306 ? ":$port" : ''; + $cache_file = "$cache_dir/agent-local-mysql"; debug("Cache file is $cache_file"); # First, check the cache. @@ -282,7 +293,7 @@ function ss_get_mysql_stats( $options ) { $locked = flock($fp, 1); # LOCK_SH if ($locked ) { if (filesize($cache_file) > 0 - && filectime($cache_file) + ($poll_time/2) > time() + && filectime($cache_file) + ($cache_time) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -297,7 +308,7 @@ function ss_get_mysql_stats( $options ) { # another process ran and updated it. Let's see if we can just # return the data now: if (filesize($cache_file) > 0 - && filectime($cache_file) + ($poll_time/2) > time() + && filectime($cache_file) + ($cache_time) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -639,6 +650,7 @@ function ss_get_mysql_stats( $options ) { 'binary_log_space' => 'cz', 'innodb_locked_tables' => 'd0', 'innodb_lock_structs' => 'd1', + 'State_closing_tables' => 'd2', 'State_copying_to_tmp_table' => 'd3', 'State_end' => 'd4', @@ -655,6 +667,7 @@ function ss_get_mysql_stats( $options ) { 'State_writing_to_net' => 'df', 'State_none' => 'dg', 'State_other' => 'dh', + 'Handler_commit' => 'di', 'Handler_delete' => 'dj', 'Handler_discover' => 'dk', @@ -670,6 +683,7 @@ function ss_get_mysql_stats( $options ) { 'Handler_savepoint_rollback' => 'du', 'Handler_update' => 'dv', 'Handler_write' => 'dw', + # Some InnoDB stats added later... 'innodb_tables_in_use' => 'dx', 'innodb_lock_wait_secs' => 'dy', @@ -1107,16 +1121,16 @@ function to_int ( $str ) { function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysql_query($sql, $conn); + $result = @mysqli_query( $conn, $sql); if ($debug ) { - $error = @mysql_error($conn); + $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); if ($error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysql_fetch_array($result) ) { + while ( $row = @mysqli_fetch_array($result) ) { $array[] = $row; } debug(array($sql, $array)); @@ -1234,5 +1248,3 @@ function debug($val) { $debug_log = FALSE; } } - -?> From e80d6c2fa24e4e72092cc28cfb345b6777586643 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sun, 19 Mar 2017 13:03:59 -0500 Subject: [PATCH 113/497] add unbound SNMP extend script (#102) --- snmp/unbound | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 snmp/unbound diff --git a/snmp/unbound b/snmp/unbound new file mode 100644 index 000000000..622d48488 --- /dev/null +++ b/snmp/unbound @@ -0,0 +1,13 @@ +#!/bin/sh + +# Add this to snmpd.conf as below. +# extend unbound /root/snmp-extends/unbound + +# Set the path to unbound-control. +unbountctl='/usr/bin/env unbound-control' + +## +## You should not need to change anything below. +## + +$unbountctl stats From f6f18d513a560b745f9e88aabb5d9c7c315e6e83 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 22 Mar 2017 09:28:57 -0500 Subject: [PATCH 114/497] add SMART SNMP extend script (#101) * add SMART SNMP extend * cleanup default disk examples * correct a small typo * add option caching support * add checking selftest log and nolonger zeros non-existent IDs * now uses a config file * add the ability to guess at the config * properly remove device entries with partitions now and avoid adding dupes in a better manner * now have smartctl scan as well to see if it missed anything * note why ses and pass are ignored * properly use the cache file in the config now * actually use the cache now --- snmp/smart | 344 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 344 insertions(+) create mode 100755 snmp/smart diff --git a/snmp/smart b/snmp/smart new file mode 100755 index 000000000..3dd273ccf --- /dev/null +++ b/snmp/smart @@ -0,0 +1,344 @@ +#!/usr/bin/env perl +#Copyright (c) 2017, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf like below. + + extend smart /etc/snmp/smart + +Then add to root's cron tab, if you have more than a few disks. + + */3 * * * * /etc/snmp/smart -u + +You will also need to create the config file, which defaults to the same path as the script, +but with .config appended. So if the script is located at /etc/snmp/smart, the config file +will be /etc/snmp/smart.config. Alternatively you can also specific a config via -c. + +Anything starting with a # is comment. The format for variables is $variable=$value. Empty +lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any +line with out a = or # are treated as a disk. + + #This is a comment + cache=/var/cache/smart + smartctl=/usr/bin/env smartctl + ada0 + ada1 + +The variables are as below. + + cache = The path to the cache file to use. Default: /var/cache/smart + smartctl = The path to use for smartctl. Default: /usr/bin/env smartctl + +If you want to guess at the configuration, call it with -g and it will print out what it thinks +it should be. + +=cut + +## +## You should not need to touch anything below here. +## +my $cache='/var/cache/smart'; +my $smartctl='/usr/bin/env smartctl'; +my @disks; + +use warnings; +use strict; +use Getopt::Std; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "SMART SNMP extend 0.0.0\n"; +}; + + +sub main::HELP_MESSAGE { + print "\n". + "-u Update '".$cache."'\n". + "-g Guess at the config and print it to STDOUT.\n". + "-c The config file to use.\n"; +} + +#gets the options +my %opts=(); +getopts('ugc:', \%opts); + +# guess if asked +if ( defined( $opts{g} ) ){ + + #get what path to use for smartctl + $smartctl=`which smartctl`; + chomp($smartctl); + if ( $? != 0 ){ + warn("'which smartctl' failed with a exit code of $?"); + exit 1; + } + + #try to touch the default cache location and warn if it can't be done + system('touch '.$cache.'>/dev/null'); + if ( $? != 0 ){ + $cache='#Could not touch '.$cache. "You will need to manually set it\n". + "cache=?\n"; + }else{ + $cache='cache='.$cache."\n"; + } + + my %found_disks; + + #check for drives named /dev/sd* + my @matches=glob('/dev/sd*'); + @matches=grep(!/[0-9]/, @matches); + my $matches_int=0; + while ( defined( $matches[$matches_int] ) ){ + my $device=$matches[$matches_int]; + system( $smartctl.' -A '.$device.' > /dev/null' ); + if ( $? == 0 ){ + $device =~ s/\/dev\///; + $found_disks{$device}=1; + } + + $matches_int++; + } + + #check for drives named /dev/ada* + @matches=glob('/dev/ada*'); + @matches=grep(!/[ps]/, @matches); + $matches_int=0; + while ( defined( $matches[$matches_int] ) ){ + my $device=$matches[$matches_int]; + system( $smartctl.' -A '.$device.' > /dev/null' ); + if ( $? == 0 ){ + $device =~ s/\/dev\///; + $found_disks{$device}=1; + } + + $matches_int++; + } + + #check for drives named /dev/da* + @matches=glob('/dev/da*'); + @matches=grep(!/[ps]/, @matches); + $matches_int=0; + while ( defined( $matches[$matches_int] ) ){ + my $device=$matches[$matches_int]; + system( $smartctl.' -A '.$device.' > /dev/null' ); + if ( $? == 0 ){ + $device =~ s/\/dev\///; + $found_disks{$device}=1; + } + + $matches_int++; + } + + #have smartctl scan and see if it finds anythings not get found + my $scan_output=`$smartctl --scan-open`; + my @scan_outputA=split(/\n/, $scan_output); + @scan_outputA=grep(!/ses[0-9]/, @scan_outputA); # not a disk, but may or may not have SMART attributes + @scan_outputA=grep(!/pass[0-9]/, @scan_outputA); # very likely a duplicate and a disk under another name + $matches_int=0; + while ( defined( $scan_outputA[$matches_int] ) ){ + my $device=$scan_outputA[$matches_int]; + $device =~ s/ .*//; + system( $smartctl.' -A '.$device.' > /dev/null' ); + if ( $? == 0 ){ + $device =~ s/\/dev\///; + $found_disks{$device}=1; + } + + $matches_int++; + } + + print 'smartctl='.$smartctl."\n". + $cache. + join( "\n", keys(%found_disks) )."\n"; + + exit 0; +} + +#get which config file to use +my $config=$0.'.config'; +if ( defined( $opts{c} ) ){ + $config=$opts{c}; +} + +#reads the config file, optionally +my $config_file=''; +open(my $readfh, "<", $config) or die "Can't open '".$config."'"; +read($readfh , $config_file , 1000000); +close($readfh); + +#parse the config file and remove comments and empty lines +my @configA=split(/\n/, $config_file); +@configA=grep(!/^$/, @configA); +@configA=grep(!/^\#/, @configA); +@configA=grep(!/^[\s\t]*$/, @configA); +my $configA_int=0; +while ( defined( $configA[$configA_int] ) ){ + my $line=$configA[$configA_int]; + $line=~s/^[\t\s]+//; + $line=~s/[\t\s]+$//; + + my ( $var, $val )=split(/=/, $line, 2); + + if ( $var eq 'cache' ){ + $cache=$val; + } + + if ( $var eq 'smartctl' ){ + $smartctl=$val; + } + + if ( !defined( $val ) ){ + push(@disks, $var); + } + + $configA_int++; +} + +#if set to 1, no cache will be written and it will be printed instead +my $noWrite=0; + +# if no -u, it means we are being called from snmped +if ( ! defined( $opts{u} ) ){ + # if the cache file exists, print it, otherwise assume one is not being used + if ( -f $cache ){ + my $old=''; + open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; + read($readfh , $old , 1000000); + close($readfh); + print $old; + exit 0; + }else{ + $opts{u}=1; + $noWrite=1; + } +} + +my $toReturn=''; +my $int=0; +while ( defined($disks[$int]) ) { + my $disk=$disks[$int]; + my $output=`$smartctl -A /dev/$disk`; + + my %IDs=( '5'=>'null', + '10'=>'null', + '173'=>'null', + '177'=>'null', + '183'=>'null', + '184'=>'null', + '187'=>'null', + '188'=>'null', + '190'=>'null', + '194'=>'null', + '196'=>'null', + '197'=>'null', + '198'=>'null', + '199'=>'null', + '231'=>'null', + '233'=>'null', + ); + + my @outputA=split( /\n/, $output ); + my $outputAint=0; + while ( defined($outputA[$outputAint]) ) { + my $line=$outputA[$outputAint]; + $line=~s/^ +//; + $line=~s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[9]; + my $id=$lineA[0]; + + # single int raw values + if ( + ( $id == 5 ) || + ( $id == 10 ) || + ( $id == 173 ) || + ( $id == 177 ) || + ( $id == 183 ) || + ( $id == 184 ) || + ( $id == 187 ) || + ( $id == 198 ) || + ( $id == 199 ) || + ( $id == 231 ) || + ( $id == 233 ) + ) { + $IDs{$id}=$raw; + } + + # 188, Command_Timeout + if ( $id == 188 ) { + my $total=0; + my @rawA=split( /\ /, $raw ); + my $rawAint=0; + while ( defined( $rawA[$rawAint] ) ) { + $total=$total+$rawA[$rawAint]; + $rawAint++; + } + + } + + # 190, airflow temp + # 194, temp + if ( + ( $id == 190 ) || + ( $id == 194 ) + ) { + my ( $temp )=split(/\ /, $raw); + $IDs{$id}=$temp; + } + + } + + $outputAint++; + } + + #get the selftest logs + $output=`$smartctl -l selftest /dev/$disk`; + @outputA=split( /\n/, $output ); + my $completed=scalar grep(/Completed without error/, @outputA); + my $interrupted=scalar grep(/Interrupted/, @outputA); + my $read_failure=scalar grep(/read failure/, @outputA); + my $unknown_failure=scalar grep(/unknown failure/, @outputA); + my $extended=scalar grep(/Extended/, @outputA); + my $short=scalar grep(/Short/, @outputA); + my $conveyance=scalar grep(/Conveyance/, @outputA); + my $selective=scalar grep(/Selective/, @outputA); + + + $toReturn=$toReturn.$disk.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} + .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. + $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; + + $int++; +} + +if ( ! $noWrite ){ + open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; + print $writefh $toReturn; + close($writefh); +}else{ + print $toReturn; +} From 87fe0bd192897432fd590351a2b75004f85b3929 Mon Sep 17 00:00:00 2001 From: Florian Beer Date: Thu, 30 Mar 2017 00:00:26 +0200 Subject: [PATCH 115/497] Postfix app bug fixes (#105) * Postfix app bug fixes - add missing DS - fix some totals * Move new variable to end of output --- snmp/postfixdetailed | 39 ++++++++++++++++++++++++++++++--------- 1 file changed, 30 insertions(+), 9 deletions(-) diff --git a/snmp/postfixdetailed b/snmp/postfixdetailed index 20d58946e..9bf71e9c7 100644 --- a/snmp/postfixdetailed +++ b/snmp/postfixdetailed @@ -86,6 +86,7 @@ my ( $received, $rardnf, $rarnfqa, $iuscp, + $msefl, $sce, $scp, $urr) = split ( /\n/, $old ); @@ -118,6 +119,7 @@ if ( ! defined( $sarnfqa ) ){ $sarnfqa=0; } if ( ! defined( $rardnf ) ){ $rardnf=0; } if ( ! defined( $rarnfqa ) ){ $rarnfqa=0; } if ( ! defined( $iuscp ) ){ $iuscp=0; } +if ( ! defined( $msefl ) ){ $msefl=0; } if ( ! defined( $sce ) ){ $sce=0; } if ( ! defined( $scp ) ){ $scp=0; } if ( ! defined( $urr ) ){ $urr=0; } @@ -151,6 +153,7 @@ my $sarnfqaC=0; my $rardnfC=0; my $rarnfqaC=0; my $iuscpC=0; +my $mseflC=0; my $sceC=0; my $scpC=0; my $urrC=0; @@ -185,16 +188,22 @@ sub newValue{ if ( $old > $new ){ return $new; }; - + return $new - $old; } my $output=`$pflogsumm /var/log/maillog`; +#holds client host rejected values till the end when it is compared to the old one +my $chrNew=0; + #holds RBL values till the end when it is compared to the old one my $buNew=0; +# holds recipient address rejected values till the end when it is compared to the old one +my $raruuNew=0; + #holds the current values for checking later my $current=''; @@ -344,16 +353,15 @@ while ( defined( $outputA[$int] ) ){ # deferrals Host is down if ( ( $line =~ /Host is down$/ ) && ( ! $handled ) ){ $line=~s/ .*//; - $deferralcrC=$line; - $deferralcr=newValue( $deferralcr, $line ); + $deferralhidC=$line; + $deferralhid=newValue( $deferralhid, $line ); $handled=1; } # Client host rejected if ( ( $line =~ /Client host rejected/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; - $chrC=$line; - $chr=newValue( $chr, $line ); + $chrNew=$chrNew + $line; $handled=1; } @@ -393,8 +401,7 @@ while ( defined( $outputA[$int] ) ){ #Recipient address rejected: User unknown if ( ( $line =~ /Recipient address rejected\: User unknown/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; - $raruuC=$line; - $raruu=newValue( $raruu, $line ); + $raruuNew=$raruuNew + $line; $handled=1; } @@ -447,12 +454,24 @@ while ( defined( $outputA[$int] ) ){ $scp=newValue( $scp, $line ); } + #unknown reject reason + if ( ( $line =~ /unknown reject reason/ ) && ( ! $handled ) ){ + $line=~s/.*\: //g; + $urrC=$line; + $urr=newValue( $urr, $line ); + } $int++; } +# final client host rejected total +$chr=newValue( $chr, $chrNew ); + # final RBL total $bu=newValue( $bu, $buNew ); +# final recipient address rejected total +$raruu=newValue( $raruu, $raruuNew ); + my $data=$received."\n". $delivered."\n". $forwarded."\n". @@ -484,6 +503,7 @@ my $data=$received."\n". $sce."\n". $scp."\n". $urr."\n"; + $msefl."\n". print $data; @@ -504,17 +524,18 @@ my $current=$receivedC."\n". $recipienthdC."\n". $deferralcrC."\n". $deferralhidC."\n". - $chrC."\n". + $chrNew."\n". $hcrnfqhC."\n". $sardnfC."\n". $sarnobuC."\n". $buNew."\n". - $raruuC."\n". + $raruuNew."\n". $hcrinC."\n". $sarnfqaC."\n". $rardnfC."\n". $rarnfqaC."\n". $iuscpC."\n". + $mseflC."\n". $sceC."\n". $scpC."\n". $urrC."\n"; From 5dcd3bfa217dab70caa16463a1e322c943526a62 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Wed, 29 Mar 2017 19:11:23 -0500 Subject: [PATCH 116/497] Copy powerdns-recursor to snmp and remove <<>> header (#111) --- snmp/powerdns-recursor | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100755 snmp/powerdns-recursor diff --git a/snmp/powerdns-recursor b/snmp/powerdns-recursor new file mode 100755 index 000000000..d673738bf --- /dev/null +++ b/snmp/powerdns-recursor @@ -0,0 +1,12 @@ +#!/usr/bin/python +import json, subprocess +from subprocess import Popen, PIPE + +input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] +data = [] + +for line in input.splitlines(): + item = line.split() + data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + +print(json.dumps(data)) From 5242c61bbaabae2180725fd0f2abecdefa4174cd Mon Sep 17 00:00:00 2001 From: Philip Rosenberg-Watt Date: Thu, 6 Apr 2017 03:24:36 -0600 Subject: [PATCH 117/497] fix: Update proxmox agent to use new Perl module (#88) PVE::API2Client is deprecated in Proxmox 4.4-6. Agent now requires installation of libpve-apiclient-perl via apt. This commit fixes #81. --- agent-local/proxmox | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/agent-local/proxmox b/agent-local/proxmox index 09f0fb3bd..83184144b 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -19,7 +19,7 @@ use constant { }; use strict; -use PVE::API2Client; +use PVE::APIClient::LWP; use PVE::AccessControl; use PVE::INotify; use Data::Dumper; @@ -29,14 +29,14 @@ my $hostname = PVE::INotify::read_file("hostname"); my $ticket = PVE::AccessControl::assemble_ticket('root@pam'); my $csrftoken = PVE::AccessControl::assemble_csrf_prevention_token('root@pam'); -my $conn = PVE::API2Client->new( +my $conn = PVE::APIClient::LWP->new( ticket => $ticket, csrftoken => $csrftoken, ); my $clustername; -foreach my $child (@{$conn->get("/api2/json/cluster/status")->{'data'}}) { +foreach my $child (@{$conn->get("/api2/json/cluster/status")}) { if ($child->{'type'} eq "cluster") { $clustername = $child->{'name'}; } @@ -50,19 +50,19 @@ print "<<>>\n"; print "$clustername\n"; -foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")->{'data'}}) { +foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")}) { my $vmid = $vm->{'vmid'}; eval { - my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'name'}; - my $tmpl = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'data'}->{'template'}; + my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'name'}; + my $tmpl = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'template'}; if (defined($tmpl) && $tmpl == 1) { die; } print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; }; eval { - my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'hostname'}; - my $tmpl = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'data'}->{'template'}; + my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'hostname'}; + my $tmpl = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'template'}; if (defined($tmpl) && $tmpl == 1) { die; } From 00f5b13a4772a4137b4b3f8bb22e7437849a4794 Mon Sep 17 00:00:00 2001 From: Chris Putnam Date: Fri, 7 Apr 2017 01:45:56 -0500 Subject: [PATCH 118/497] hddtemp: improve detection of drives (#115) Previously, this script was only able to find 26 drives (sda-sdz) due to the use of globbing. A better strategy for detecting drives would be to use lsblk on systems that support it, failing over to globbing. This patch adds support both for lsblk and a more comprehensive glob solution with find that will at least catch 26^2 drives. --- agent-local/hddtemp | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 18e3bdbad..4f533be99 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -10,10 +10,14 @@ # option) any later version. Please see LICENSE.txt at the top level of # the source code distribution for details. # -# requires which, awk and sed +# requires which, find, awk and sed -# If disks are missing, they can be added here: -disks="/dev/hd? /dev/sd?" +# Try to use lsblk if available. Otherwise, use find. +if type lsblk >/dev/null 2>&1; then + disks=`lsblk -dnp|cut -d' ' -f1 | tr '\n' ' '` +else + disks=`find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' '` +fi hddtemp=`which hddtemp 2>/dev/null` From ae8b5b8a0a42019ab3d8489553561ce3176b2e62 Mon Sep 17 00:00:00 2001 From: Chris Putnam Date: Tue, 18 Apr 2017 13:32:41 -0700 Subject: [PATCH 119/497] hddtemp: parallelize calls to hddtemp for performance (#117) This poll script runs hddtemp with a list of all drives as arguments and reads the output. hddtemp scans each drive's SMART status serially, which scales poorly with a large number of drives. In lieu of a patch to the actual hddtemp project, optionally use GNU parallel when available to parallelize the call to hddtemp. In testing a machine with 58 drives I went from a runtime of about 5 seconds per run to 0.5s, a performance improvement of 10x. --- agent-local/hddtemp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 4f533be99..e2b99d759 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -11,6 +11,9 @@ # the source code distribution for details. # # requires which, find, awk and sed +# +# optionally, install gnu parallel for a significant performance boost +# on machines with large numbers of drives. # Try to use lsblk if available. Otherwise, use find. if type lsblk >/dev/null 2>&1; then @@ -23,7 +26,13 @@ hddtemp=`which hddtemp 2>/dev/null` if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then - content=`${hddtemp} -w -q ${disks} 2>/dev/null | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g'` + if type parallel > /dev/null 2>&1; then + # When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!) + output=`parallel ${hddtemp} -w -q ::: ${disks} 2>/dev/null` + else + output=`${hddtemp} -w -q ${disks} 2>/dev/null` + fi + content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From 5f7fe1ff057b1bbd76272a3b49116310f5b258c7 Mon Sep 17 00:00:00 2001 From: Svennd Date: Tue, 18 Apr 2017 22:34:05 +0200 Subject: [PATCH 120/497] add support for SGE/rocks job tracker (#118) --- agent-local/rocks.sh | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100755 agent-local/rocks.sh diff --git a/agent-local/rocks.sh b/agent-local/rocks.sh new file mode 100755 index 000000000..f3547adaa --- /dev/null +++ b/agent-local/rocks.sh @@ -0,0 +1,22 @@ +#!/bin/bash +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. Please see LICENSE.txt at the top level of +# the source code distribution for details. +# @author SvennD + +# required +source /etc/profile.d/sge-binaries.sh; + +QSTAT="/opt/gridengine/bin/linux-x64/qstat" +RUNNING_JOBS=$($QSTAT -u "*" -s r | wc -l) +PENDING_JOBS=$($QSTAT -u "*" -s p | wc -l) +SUSPEND_JOBS=$($QSTAT -u "*" -s s | wc -l) +ZOMBIE_JOBS=$($QSTAT -u "*" -s z | wc -l) + +echo $RUNNING_JOBS; +echo $PENDING_JOBS; +echo $SUSPEND_JOBS; +echo $ZOMBIE_JOBS; + From 1dd46e3d546e9aa1116b053ce6d103b6596a847f Mon Sep 17 00:00:00 2001 From: RedChops Date: Sat, 22 Apr 2017 19:29:00 -0400 Subject: [PATCH 121/497] Include missing SMART ids in the output (#120) --- snmp/smart | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/smart b/snmp/smart index 3dd273ccf..35e059916 100755 --- a/snmp/smart +++ b/snmp/smart @@ -280,6 +280,8 @@ while ( defined($disks[$int]) ) { ( $id == 183 ) || ( $id == 184 ) || ( $id == 187 ) || + ( $id == 196 ) || + ( $id == 197 ) || ( $id == 198 ) || ( $id == 199 ) || ( $id == 231 ) || From 6f3d875f17b3e1c159231879ba80ac86b3336491 Mon Sep 17 00:00:00 2001 From: crcro Date: Thu, 27 Apr 2017 22:54:55 +0300 Subject: [PATCH 122/497] snmp-extend: sdfsinfo (#122) * sdfsinfo app snmp extend * rewrite script to bash * more vars --- snmp/sdfsinfo | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) create mode 100644 snmp/sdfsinfo diff --git a/snmp/sdfsinfo b/snmp/sdfsinfo new file mode 100644 index 000000000..56901ec97 --- /dev/null +++ b/snmp/sdfsinfo @@ -0,0 +1,26 @@ +#!/usr/bin/env bash + +################################################################## +# +# SDFS info SNMP extend script +# datapoint -> info (type, descr): +# 00 -> files (int) +# 01 -> volume capacity (float, GB) +# 02 -> volume logical size (float, GB) +# 03 -> volume max load (float, %) +# 04 -> volume duplicate data (float, GB) +# 05 -> unique blocks stored (float, GB) +# 06 -> unique blocks stored after compression (float, GB) +# 07 -> cluster block copies (int) +# 08 -> volume dedup rate (float, %) +# 09 -> volume savings (float, %) +# 10 -> compression rate (float, %) +# +################################################################## + +SDFSCLI_BIN=`which sdfscli` +SDFSCLI_CMD=' --volume-info' +GREP_BIN=`which grep` +GREP_CMD=' -o -E ' +DATAPOINTS=`$SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)"` +echo $DATAPOINTS From 90655f79be9ae0e63ed8e8b58ac390e4bcb8ac2c Mon Sep 17 00:00:00 2001 From: Aldemir Akpinar Date: Tue, 2 May 2017 12:22:19 +0300 Subject: [PATCH 123/497] Added Devuan GNU/Linux support (#124) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 482adcdd3..5886e5cd0 100755 --- a/snmp/distro +++ b/snmp/distro @@ -40,6 +40,10 @@ elif [ "${OS}" = "Linux" ] ; then PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//` REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/devuan_version ] ; then + DIST="Devuan `cat /etc/devuan_version`" + REV="" + elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" From 2a57af73469aa8195bf40043ea9827e77edbbc54 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 3 May 2017 09:23:40 -0500 Subject: [PATCH 124/497] BIND cleanup and expansion (#108) * add BIND named SNMP extend * nolonger piss the entire stats across the wire, but crunch them and return them * more work on bind * more misc. updates * add proper agent support as well as optional zeroing * add -m --- snmp/bind | 849 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 849 insertions(+) create mode 100644 snmp/bind diff --git a/snmp/bind b/snmp/bind new file mode 100644 index 000000000..d55b46624 --- /dev/null +++ b/snmp/bind @@ -0,0 +1,849 @@ +#!/usr/bin/env perl +#Copyright (c) 2017, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart it. + + extend bind /etc/snmp/bind + +You may also need to create the config file, which defaults to the same path as the script, +but with .config appended. So if the script is located at /etc/snmp/bind, the config file +will be /etc/snmp/bind.config. Alternatively you can also specific a config via -c. + +Anything starting with a # is comment. The format for variables is $variable=$value. Empty +lines are ignored. Spaces and tabes at either the start or end of a line are ignored. + +The variables are as below. + + rndc = The path to rndc. Default: /usr/bin/env rndc + call_rndc = A 0/1 boolean on weather to call rndc stats. Suggest to set to 0 if using netdata. Default: 1 + stats_file = The path to the named stats file. Default: /var/run/named/stats + agent = A 0/1 boolean for if this is being used as a LibreNMS agent or not. Default: 0 + zero_stats = A 0/1 boolean for if the stats file should be zeroed first. Default: 0 (1 if guessed) + +If you want to guess at the configuration, call it with -g and it will print out what it thinks +it should be. + +=cut + +## +## You should not need to touch anything below here. +## +my $call_rndc=1; +my $rndc='/usr/bin/env rndc'; +my $stats_file='/var/run/named/stats'; +my $zero_stats=0; +my $agent=0; +my $missing=0; + +use strict; +use warnings; +use File::ReadBackwards; +use Getopt::Std; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "BIND named stats extend 0.0.0\n"; +}; + + +sub main::HELP_MESSAGE { + print "\n". + "-c The config file to use.\n". + "-m print any unknowns and exit\n". + "-g Guess at the config and print it to STDOUT.\n"; +} + +#gets the options +my %opts=(); +getopts('gmc:', \%opts); + +# guess if asked +if ( defined( $opts{g} ) ){ + #get what path to use for rndc + $rndc=`which rndc`; + chomp($rndc); + if ( $? != 0 ){ + warn("'which rndc' failed with a exit code of $?"); + exit 1; + }else{ + $rndc="# This is the path to rndc.\n". + 'rndc='.$rndc."\n"; + } + + #make a basic guess at the stats file + if ( -f $stats_file ){ + # a more sane location + $stats_file="# This is the the path to the named stats file.\n". + 'stats_file='.$stats_file."\n"; + }elsif( -f '/etc/bind/named.stats' ){ + # this is if the person using the old suggested config in the LibreNMS docs + $stats_file="# This is the the path to the named stats file.\n". + "stats_file=/etc/bind/named.stats\n"; + }else{ + #we find it + $stats_file="# This is the the path to the named stats file.\n". + "# Please make sure this has been set to the value of statistics-file in named.conf.\n". + "stats_file=?\n"; + } + + if ( $0 =~ /agent/ ){ + $agent='agent=1'; + }else{ + $agent='agent=0'; + } + + print "# The default config file is... ".$0.".config\n". + $rndc. + $stats_file. + "# This is a 0/1 boolean for if rndc should be called.\n". + "# If you are using netdata, you most likely want to set this to 0.\n". + "call_rndc=1\n". + "# This is a 0/1 boolean for this is being used as a LibreNMS agent.\n". + $agent."\n". + "# This is a 0/1 boolean for if the stats file should be zeroed before calling rndc stats.\n". + "zero_stats=1\n"; + + exit 0; +} + +#get which config file to use +my $config=$0.'.config'; +if ( defined( $opts{c} ) ){ + $config=$opts{c}; +} + +#reads the config file +my $config_file=''; +if ( -f $config ){ + open(my $readfh, "<", $config) or die "Can't open '".$config."'"; + read($readfh , $config , 1000000); + close($readfh); + + #parse the config file and remove comments and empty lines + my @configA=split(/\n/, $config_file); + @configA=grep(!/^$/, @configA); + @configA=grep(!/^\#/, @configA); + @configA=grep(!/^[\s\t]*$/, @configA); + my $configA_int=0; + while ( defined( $configA[$configA_int] ) ){ + my $line=$configA[$configA_int]; + $line=~s/^[\t\s]+//; + $line=~s/[\t\s]+$//; + + my ( $var, $val )=split(/=/, $line, 2); + + if ( $var eq 'call_rndc' ){ + $call_rndc=$val; + } + + if ( $var eq 'rndc' ){ + $rndc=$val; + } + + if ( $var eq 'stats_file' ){ + $stats_file=$val; + } + + if ( $var eq 'agent' ){ + $agent=$val; + } + + if ( $var eq 'zero_stats' ){ + $zero_stats=$val; + } + + $configA_int++; + } +} + +#zero the stats if needed +if ( $zero_stats ){ + system('echo > '.$stats_file); + if ( $? != 0 ){ + die ("'echo > $stats_file' failed with a system return value of $?"); + } +} + +# call rndc if needed and die if it failes +if ( $call_rndc ){ + system($rndc.' stats'); + if ( $? != 0 ){ + die ("'$rndc stats' failed with a system return value of $?"); + } +} + +my $bw=File::ReadBackwards->new( $stats_file ) or + die( "con't read '$stats_file': $!" ); + +#read backwards till we find the start of the last stats entry +my $read=1; +my @data; +until ( + ($bw->eof) || + ( ! $read ) + ){ + + my $new_line=$bw->readline; + $data[$#data++]=$new_line; + + if ($new_line =~ /^\+\+\+\ Statistics\ Dump\ \+\+\+/){ + $read=0; + } +} + +my %incoming=( + 'A'=>0, + 'AAAA'=>0, + 'AFSDB'=>0, + 'APL'=>0, + 'CAA'=>0, + 'CDNSKEY'=>0, + 'CDS'=>0, + 'CERT'=>0, + 'CNAME'=>0, + 'DHCID'=>0, + 'DLV'=>0, + 'DNSKEY'=>0, + 'DS'=>0, + 'IPSECKEY'=>0, + 'KEY'=>0, + 'KX'=>0, + 'LOC'=>0, + 'MX'=>0, + 'NAPTR'=>0, + 'NS'=>0, + 'NSEC'=>0, + 'NSEC3'=>0, + 'NSEC3PARAM'=>0, + 'PTR'=>0, + 'RRSIG'=>0, + 'RP'=>0, + 'SIG'=>0, + 'SOA'=>0, + 'SRV'=>0, + 'SSHFP'=>0, + 'TA'=>0, + 'TKEY'=>0, + 'TLSA'=>0, + 'TSIG'=>0, + 'TXT'=>0, + 'URI'=>0, + 'DNAME'=>0, + 'ANY'=>0, + 'AXFR'=>0, + 'IXFR'=>0, + 'OPT'=>0, + 'SPF'=>0, + ); + +my %outgoing=( + 'A'=>0, + 'AAAA'=>0, + 'AFSDB'=>0, + 'APL'=>0, + 'CAA'=>0, + 'CDNSKEY'=>0, + 'CDS'=>0, + 'CERT'=>0, + 'CNAME'=>0, + 'DHCID'=>0, + 'DLV'=>0, + 'DNSKEY'=>0, + 'DS'=>0, + 'IPSECKEY'=>0, + 'KEY'=>0, + 'KX'=>0, + 'LOC'=>0, + 'MX'=>0, + 'NAPTR'=>0, + 'NS'=>0, + 'NSEC'=>0, + 'NSEC3'=>0, + 'NSEC3PARAM'=>0, + 'PTR'=>0, + 'RRSIG'=>0, + 'RP'=>0, + 'SIG'=>0, + 'SOA'=>0, + 'SRV'=>0, + 'SSHFP'=>0, + 'TA'=>0, + 'TKEY'=>0, + 'TLSA'=>0, + 'TSIG'=>0, + 'TXT'=>0, + 'URI'=>0, + 'DNAME'=>0, + 'ANY'=>0, + 'AXFR'=>0, + 'IXFR'=>0, + 'OPT'=>0, + 'SPF'=>0, + ); + +my %server=( + 'IPv4 requests received'=>0, #i4rr + 'IPv6 requests received'=>0, #i6rr + 'requests with EDNS(0) received'=>0, #rwer + 'TCP requests received'=>0, #trr + 'auth queries rejected'=>0, #aqr + 'recursive queries rejected'=>0, #rqr + 'responses sent'=>0, #rs + 'truncated responses sent'=>0, #trs + 'responses with EDNS(0) sent'=>0, #rwes + 'queries resulted in successful answer'=>0, #qrisa + 'queries resulted in authoritative answer'=>0, #qriaa + 'queries resulted in non authoritative answer'=>0, #qrinaa + 'queries resulted in nxrrset'=>0, #qrin + 'queries resulted in SERVFAIL'=>0, #qris + 'queries resulted in NXDOMAIN'=>0, #qrind + 'queries caused recursion'=>0, #qcr + 'duplicate queries received'=>0, #dqr + 'other query failures'=>0, #oqf + 'UDP queries received'=>0, #uqr + 'TCP queries received'=>0, #tqr + 'Other EDNS option received'=>0, #oeor + 'queries dropped'=>0, #qd + ); + +my %resolver=( + 'IPv4 queries sent'=>0, #i4qs + 'IPv6 queries sent'=>0, #i6qs + 'IPv4 responses received'=>0, #i4rr + 'IPv6 responses received'=>0, #i6rr + 'NXDOMAIN received'=>0, #nr + 'SERVFAIL received'=>0, #sr + 'FORMERR received'=>0, #fr + 'EDNS(0) query failures'=>0, #eqf + 'truncated responses received'=>0, #trr + 'lame delegations received'=>0, #ldr + 'query retries'=>0, #qr + 'query timeouts'=>0, #qt + 'IPv4 NS address fetches'=>0, #i4naf + 'IPv6 NS address fetches'=>0, #i6naf + 'IPv4 NS address fetch failed'=>0, #i4naff + 'IPv6 NS address fetch failed'=>0, #i6naff + 'queries with RTT < 10ms'=>0, #rttl10 + 'queries with RTT 10-100ms'=>0, #rtt10t100 + 'queries with RTT 100-500ms'=>0, #rtt100t500 + 'queries with RTT 500-800ms'=>0, #rtt500t800 + 'queries with RTT 800-1600ms'=>0, #rtt800t1600 + 'queries with RTT > 1600ms'=>0, #rttg1600 + 'bucket size'=>0, #bs + 'REFUSED received'=>0 #rr + ); + +my %cache=( + 'cache hits'=>0, #ch + 'cache misses'=>0, #cm + 'cache hits (from query)'=>0, #chfq + 'cache misses (from query)'=>0, #cmfq + 'cache records deleted due to memory exhaustion'=>0, #crddtme + 'cache records deleted due to TTL expiration'=>0, #crddtte + 'cache database nodes'=>0, #cdn + 'cache database hash buckets'=>0, #cdhb + 'cache tree memory total'=>0, #ctmt + 'cache tree memory in use'=>0, #ctmiu + 'cache tree highest memory in use'=>0, #cthmiu + 'cache heap memory total'=>0, #chmt + 'cache heap memory in use'=>0, #chmiu + 'cache heap highest memory in use'=>0,#chhmiu + ); + +my %RRsets=( + 'A'=>0, + 'AAAA'=>0, + 'AFSDB'=>0, + 'APL'=>0, + 'CAA'=>0, + 'CDNSKEY'=>0, + 'CDS'=>0, + 'CERT'=>0, + 'CNAME'=>0, + 'DHCID'=>0, + 'DLV'=>0, + 'DNSKEY'=>0, + 'DS'=>0, + 'IPSECKEY'=>0, + 'KEY'=>0, + 'KX'=>0, + 'LOC'=>0, + 'MX'=>0, + 'NAPTR'=>0, + 'NS'=>0, + 'NSEC'=>0, + 'NSEC3'=>0, + 'NSEC3PARAM'=>0, + 'PTR'=>0, + 'RRSIG'=>0, + 'RP'=>0, + 'SIG'=>0, + 'SOA'=>0, + 'SRV'=>0, + 'SSHFP'=>0, + 'TA'=>0, + 'TKEY'=>0, + 'TLSA'=>0, + 'TSIG'=>0, + 'TXT'=>0, + 'URI'=>0, + 'DNAME'=>0, + 'NXDOMAIN'=>0, + 'ANY'=>0, + 'AXFR'=>0, + 'IXFR'=>0, + 'OPT'=>0, + 'SPF'=>0, + '!A'=>0, + '!AAAA'=>0, + '!AFSDB'=>0, + '!APL'=>0, + '!CAA'=>0, + '!CDNSKEY'=>0, + '!CDS'=>0, + '!CERT'=>0, + '!CNAME'=>0, + '!DHCID'=>0, + '!DLV'=>0, + '!DNSKEY'=>0, + '!DS'=>0, + '!IPSECKEY'=>0, + '!KEY'=>0, + '!KX'=>0, + '!LOC'=>0, + '!MX'=>0, + '!NAPTR'=>0, + '!NS'=>0, + '!NSEC'=>0, + '!NSEC3'=>0, + '!NSEC3PARAM'=>0, + '!PTR'=>0, + '!RRSIG'=>0, + '!RP'=>0, + '!SIG'=>0, + '!SOA'=>0, + '!SRV'=>0, + '!SSHFP'=>0, + '!TA'=>0, + '!TKEY'=>0, + '!TLSA'=>0, + '!TSIG'=>0, + '!TXT'=>0, + '!URI'=>0, + '!DNAME'=>0, + '!NXDOMAIN'=>0, + '!ANY'=>0, + '!AXFR'=>0, + '!IXFR'=>0, + '!OPT'=>0, + '!SPF'=>0, + ); + +my %ADB=( + 'Address hash table size'=>0, #ahts + 'Addresses in hash table'=>0, #aiht + 'Name hash table size'=>0, #nhts + 'Names in hash table'=>0, #niht + ); + +my %sockets=( + 'UDP/IPv4 sockets opened'=>0, #ui4so + 'UDP/IPv6 sockets opened'=>0, #ui6so + 'TCP/IPv4 sockets opened'=>0, #ti4so + 'TCP/IPv6 sockets opened'=>0, #ti6so + 'Raw sockets opened'=>0, #rso + 'UDP/IPv4 sockets closed'=>0, #ui4sc + 'UDP/IPv6 sockets closed'=>0, #ui6sc + 'TCP/IPv4 sockets closed'=>0, #ti4sc + 'TCP/IPv6 sockets closed'=>0, #ti6sc + 'UDP/IPv4 socket bind failures'=>0, #ui4sbf + 'TCP/IPv4 socket bind failures'=>0, #ti4sbf + 'UDP/IPv6 socket bind failures'=>0, #ui6sbf + 'TCP/IPv6 socket bind failures'=>0, #ti6sbf + 'UDP/IPv4 socket connect failures'=>0, #ui4scf + 'TCP/IPv4 socket connect failures'=>0, #ti4scf + 'UDP/IPv6 socket connect failures'=>0, #ui6scf + 'TCP/IPv6 socket connect failures'=>0, #ti6scf + 'UDP/IPv4 connections established'=>0, #ui4ce + 'TCP/IPv4 connections established'=>0, #ti4ce + 'UDP/IPv6 connections established'=>0, #ui6ce + 'TCP/IPv6 connections established'=>0, #ti6ce + 'TCP/IPv4 connections accepted'=>0, #ti4ca + 'TCP/IPv6 connections accepted'=>0, #ti6ca + 'UDP/IPv4 send errors'=>0, #ui4se + 'TCP/IPv4 send errors'=>0, #ti4se + 'UDP/IPv6 send errors'=>0, #ui6se + 'TCP/IPv6 send errors'=>0, #ti6se + 'UDP/IPv4 recv errors'=>0, #ui4re + 'TCP/IPv4 recv errors'=>0, #ti4re + 'UDP/IPv6 recv errors'=>0, #ui6re + 'TCP/IPv6 recv errors'=>0, #ti6re + 'UDP/IPv4 sockets active'=>0, #ui4sa + 'UDP/IPv6 sockets active'=>0, #ui6sa + 'TCP/IPv4 sockets active'=>0, #ti4sa + 'TCP/IPv6 sockets active'=>0, #ti6sa + 'Raw sockets active'=>0, #rsa + ); + +my $int=$#data-1; +my $section=''; +while ( defined( $data[$int] ) ){ + my $line=$data[$int]; + my $done=0; + + if ( $line =~ /^\+\+\ Incoming\ Queries\ \+\+/ ){ + $section='incoming'; + $done=1; + }elsif( $line =~ /\+\+\ Outgoing\ Queries\ \+\+/ ){ + $section='outgoing'; + $done=1; + }elsif( $line =~ /^\+\+\ Name\ Server\ Statistics\ \+\+/ ){ + $section='server'; + $done=1; + }elsif( $line =~ /^\+\+\ Resolver\ Statistics\ \+\+/ ){ + $section='resolver'; + $done=1; + }elsif( $line =~ /^\+\+\ Cache\ Statistics\ \+\+/ ){ + $section='cache'; + $done=1; + }elsif( $line =~ /^\+\+\ Cache\ DB\ RRsets\ \+\+/ ){ + $section='RRsets'; + $done=1; + }elsif( $line =~ /^\+\+\ ADB\ stats\ \+\+/ ){ + $section='ADB'; + $done=1; + }elsif( $line =~ /^\+\+\ Socket\ I\/O\ Statistics\ \+\+/ ){ + $section='sockets'; + $done=1; + }elsif( $line =~ /^\[/ ){ + $done=1; + }elsif( $line !~ /^[\s\t]/){ + $section=''; + } + + if ( + ( $section ne '' ) && + ( ! $done ) + ) { + $line=~s/^[\t\s]+//; + chomp($line); + my ( $count, $type )=split(/ /, $line, 2); + if ( defined( $opts{m} ) ){ + eval( 'if (! defined($'.$section.'{$type} ) ){ print $section.",".$type.",".$count."\n";}' ); + } + my $to_eval='if( defined($'.$section.'{$type}) ){$'.$section.'{$type}=$'.$section.'{$type}+$count;}'; + eval( $to_eval ); + } + + $int--; +} + +#exit now if we are just checking for missing items +if ( defined( $opts{m} ) ){ + exit 0; +} + +if ( $agent ){ + print "<<>>\n"; +} + +print $incoming{'A'}.','. + $incoming{'AAAA'}.','. + $incoming{'AFSDB'}.','. + $incoming{'APL'}.','. + $incoming{'CAA'}.','. + $incoming{'CDNSKEY'}.','. + $incoming{'CDS'}.','. + $incoming{'CERT'}.','. + $incoming{'CNAME'}.','. + $incoming{'DHCID'}.','. + $incoming{'DLV'}.','. + $incoming{'DNSKEY'}.','. + $incoming{'DS'}.','. + $incoming{'IPSECKEY'}.','. + $incoming{'KEY'}.','. + $incoming{'KX'}.','. + $incoming{'LOC'}.','. + $incoming{'MX'}.','. + $incoming{'NAPTR'}.','. + $incoming{'NS'}.','. + $incoming{'NSEC'}.','. + $incoming{'NSEC3'}.','. + $incoming{'NSEC3PARAM'}.','. + $incoming{'PTR'}.','. + $incoming{'RRSIG'}.','. + $incoming{'RP'}.','. + $incoming{'SIG'}.','. + $incoming{'SOA'}.','. + $incoming{'SRV'}.','. + $incoming{'SSHFP'}.','. + $incoming{'TA'}.','. + $incoming{'TKEY'}.','. + $incoming{'TLSA'}.','. + $incoming{'TSIG'}.','. + $incoming{'TXT'}.','. + $incoming{'URI'}.','. + $incoming{'DNAME'}.','. + $incoming{'ANY'}.','. + $incoming{'AXFR'}.','. + $incoming{'IXFR'}.','. + $incoming{'OPT'}.','. + $incoming{'SPF'}."\n"; + +print $outgoing{'A'}.','. + $outgoing{'AAAA'}.','. + $outgoing{'AFSDB'}.','. + $outgoing{'APL'}.','. + $outgoing{'CAA'}.','. + $outgoing{'CDNSKEY'}.','. + $outgoing{'CDS'}.','. + $outgoing{'CERT'}.','. + $outgoing{'CNAME'}.','. + $outgoing{'DHCID'}.','. + $outgoing{'DLV'}.','. + $outgoing{'DNSKEY'}.','. + $outgoing{'DS'}.','. + $outgoing{'IPSECKEY'}.','. + $outgoing{'KEY'}.','. + $outgoing{'KX'}.','. + $outgoing{'LOC'}.','. + $outgoing{'MX'}.','. + $outgoing{'NAPTR'}.','. + $outgoing{'NS'}.','. + $outgoing{'NSEC'}.','. + $outgoing{'NSEC3'}.','. + $outgoing{'NSEC3PARAM'}.','. + $outgoing{'PTR'}.','. + $outgoing{'RRSIG'}.','. + $outgoing{'RP'}.','. + $outgoing{'SIG'}.','. + $outgoing{'SOA'}.','. + $outgoing{'SRV'}.','. + $outgoing{'SSHFP'}.','. + $outgoing{'TA'}.','. + $outgoing{'TKEY'}.','. + $outgoing{'TLSA'}.','. + $outgoing{'TSIG'}.','. + $outgoing{'TXT'}.','. + $outgoing{'URI'}.','. + $outgoing{'DNAME'}.','. + $outgoing{'ANY'}.','. + $outgoing{'AXFR'}.','. + $outgoing{'IXFR'}.','. + $outgoing{'OPT'}.','. + $outgoing{'SPF'}."\n"; + +print $server{'IPv4 requests received'}.','. + $server{'IPv6 requests received'}.','. + $server{'requests with EDNS(0) received'}.','. + $server{'TCP requests received'}.','. + $server{'auth queries rejected'}.','. + $server{'recursive queries rejected'}.','. + $server{'responses sent'}.','. + $server{'truncated responses sent'}.','. + $server{'responses with EDNS(0) sent'}.','. + $server{'queries resulted in successful answer'}.','. + $server{'queries resulted in authoritative answer'}.','. + $server{'queries resulted in non authoritative answer'}.','. + $server{'queries resulted in nxrrset'}.','. + $server{'queries resulted in SERVFAIL'}.','. + $server{'queries resulted in NXDOMAIN'}.','. + $server{'queries caused recursion'}.','. + $server{'duplicate queries received'}.','. + $server{'other query failures'}.','. + $server{'UDP queries received'}.','. + $server{'TCP queries received'}.','. + $server{'Other EDNS option received'}.','. + $server{'queries dropped'}."\n"; + +print $resolver{'IPv4 queries sent'}.','. + $resolver{'IPv6 queries sent'}.','. + $resolver{'IPv4 responses received'}.','. + $resolver{'IPv6 responses received'}.','. + $resolver{'NXDOMAIN received'}.','. + $resolver{'SERVFAIL received'}.','. + $resolver{'FORMERR received'}.','. + $resolver{'EDNS(0) query failures'}.','. + $resolver{'truncated responses received'}.','. + $resolver{'lame delegations received'}.','. + $resolver{'query retries'}.','. + $resolver{'query timeouts'}.','. + $resolver{'IPv4 NS address fetches'}.','. + $resolver{'IPv6 NS address fetches'}.','. + $resolver{'IPv4 NS address fetch failed'}.','. + $resolver{'IPv6 NS address fetch failed'}.','. + $resolver{'queries with RTT < 10ms'}.','. + $resolver{'queries with RTT 10-100ms'}.','. + $resolver{'queries with RTT 100-500ms'}.','. + $resolver{'queries with RTT 500-800ms'}.','. + $resolver{'queries with RTT 800-1600ms'}.','. + $resolver{'queries with RTT > 1600ms'}.','. + $resolver{'bucket size'}.','. + $resolver{'REFUSED received'}."\n"; + +print $cache{'cache hits'}.','. + $cache{'cache misses'}.','. + $cache{'cache hits (from query)'}.','. + $cache{'cache misses (from query)'}.','. + $cache{'cache records deleted due to memory exhaustion'}.','. + $cache{'cache records deleted due to TTL expiration'}.','. + $cache{'cache database nodes'}.','. + $cache{'cache database hash buckets'}.','. + $cache{'cache tree memory total'}.','. + $cache{'cache tree memory in use'}.','. + $cache{'cache tree highest memory in use'}.','. + $cache{'cache heap memory total'}.','. + $cache{'cache heap memory in use'}.','. + $cache{'cache heap highest memory in use'}."\n"; + +print $RRsets{'A'}.','. + $RRsets{'AAAA'}.','. + $RRsets{'AFSDB'}.','. + $RRsets{'APL'}.','. + $RRsets{'CAA'}.','. + $RRsets{'CDNSKEY'}.','. + $RRsets{'CDS'}.','. + $RRsets{'CERT'}.','. + $RRsets{'CNAME'}.','. + $RRsets{'DHCID'}.','. + $RRsets{'DLV'}.','. + $RRsets{'DNSKEY'}.','. + $RRsets{'DS'}.','. + $RRsets{'IPSECKEY'}.','. + $RRsets{'KEY'}.','. + $RRsets{'KX'}.','. + $RRsets{'LOC'}.','. + $RRsets{'MX'}.','. + $RRsets{'NAPTR'}.','. + $RRsets{'NS'}.','. + $RRsets{'NSEC'}.','. + $RRsets{'NSEC3'}.','. + $RRsets{'NSEC3PARAM'}.','. + $RRsets{'PTR'}.','. + $RRsets{'RRSIG'}.','. + $RRsets{'RP'}.','. + $RRsets{'SIG'}.','. + $RRsets{'SOA'}.','. + $RRsets{'SRV'}.','. + $RRsets{'SSHFP'}.','. + $RRsets{'TA'}.','. + $RRsets{'TKEY'}.','. + $RRsets{'TLSA'}.','. + $RRsets{'TSIG'}.','. + $RRsets{'TXT'}.','. + $RRsets{'URI'}.','. + $RRsets{'DNAME'}.','. + $RRsets{'NXDOMAIN'}.','. + $RRsets{'ANY'}.','. + $RRsets{'AXFR'}.','. + $RRsets{'IXFR'}.','. + $RRsets{'OPT'}.','. + $RRsets{'SPF'}.','. + $RRsets{'!A'}.','. + $RRsets{'!AAAA'}.','. + $RRsets{'!AFSDB'}.','. + $RRsets{'!APL'}.','. + $RRsets{'!CAA'}.','. + $RRsets{'!CDNSKEY'}.','. + $RRsets{'!CDS'}.','. + $RRsets{'!CERT'}.','. + $RRsets{'!CNAME'}.','. + $RRsets{'!DHCID'}.','. + $RRsets{'!DLV'}.','. + $RRsets{'!DNSKEY'}.','. + $RRsets{'!DS'}.','. + $RRsets{'!IPSECKEY'}.','. + $RRsets{'!KEY'}.','. + $RRsets{'!KX'}.','. + $RRsets{'!LOC'}.','. + $RRsets{'!MX'}.','. + $RRsets{'!NAPTR'}.','. + $RRsets{'!NS'}.','. + $RRsets{'!NSEC'}.','. + $RRsets{'!NSEC3'}.','. + $RRsets{'!NSEC3PARAM'}.','. + $RRsets{'!PTR'}.','. + $RRsets{'!RRSIG'}.','. + $RRsets{'!RP'}.','. + $RRsets{'!SIG'}.','. + $RRsets{'!SOA'}.','. + $RRsets{'!SRV'}.','. + $RRsets{'!SSHFP'}.','. + $RRsets{'!TA'}.','. + $RRsets{'!TKEY'}.','. + $RRsets{'!TLSA'}.','. + $RRsets{'!TSIG'}.','. + $RRsets{'!TXT'}.','. + $RRsets{'!URI'}.','. + $RRsets{'!DNAME'}.','. + $RRsets{'!NXDOMAIN'}.','. + $RRsets{'!ANY'}.','. + $RRsets{'!AXFR'}.','. + $RRsets{'!IXFR'}.','. + $RRsets{'!OPT'}.','. + $RRsets{'!SPF'}."\n"; + +print $ADB{'Address hash table size'}.','. + $ADB{'Addresses in hash table'}.','. + $ADB{'Name hash table size'}.','. + $ADB{'Names in hash table'}."\n"; + +print $sockets{'UDP/IPv4 sockets opened'}.','. + $sockets{'UDP/IPv6 sockets opened'}.','. + $sockets{'TCP/IPv4 sockets opened'}.','. + $sockets{'TCP/IPv6 sockets opened'}.','. + $sockets{'Raw sockets opened'}.','. + $sockets{'UDP/IPv4 sockets closed'}.','. + $sockets{'UDP/IPv6 sockets closed'}.','. + $sockets{'TCP/IPv4 sockets closed'}.','. + $sockets{'TCP/IPv6 sockets closed'}.','. + $sockets{'UDP/IPv4 socket bind failures'}.','. + $sockets{'TCP/IPv4 socket bind failures'}.','. + $sockets{'UDP/IPv6 socket bind failures'}.','. + $sockets{'TCP/IPv6 socket bind failures'}.','. + $sockets{'UDP/IPv4 socket connect failures'}.','. + $sockets{'TCP/IPv4 socket connect failures'}.','. + $sockets{'UDP/IPv6 socket connect failures'}.','. + $sockets{'TCP/IPv6 socket connect failures'}.','. + $sockets{'UDP/IPv4 connections established'}.','. + $sockets{'TCP/IPv4 connections established'}.','. + $sockets{'UDP/IPv6 connections established'}.','. + $sockets{'TCP/IPv6 connections established'}.','. + $sockets{'TCP/IPv4 connections accepted'}.','. + $sockets{'TCP/IPv6 connections accepted'}.','. + $sockets{'UDP/IPv4 send errors'}.','. + $sockets{'TCP/IPv4 send errors'}.','. + $sockets{'UDP/IPv6 send errors'}.','. + $sockets{'TCP/IPv6 send errors'}.','. + $sockets{'UDP/IPv4 recv errors'}.','. + $sockets{'TCP/IPv4 recv errors'}.','. + $sockets{'UDP/IPv6 recv errors'}.','. + $sockets{'TCP/IPv6 recv errors'}.','. + $sockets{'UDP/IPv4 sockets active'}.','. + $sockets{'UDP/IPv6 sockets active'}.','. + $sockets{'TCP/IPv4 sockets active'}.','. + $sockets{'TCP/IPv6 sockets active'}.','. + $sockets{'Raw sockets active'}."\n"; From a98761cc203a2bec11a7e800a015d6fdedcda524 Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Thu, 4 May 2017 02:06:32 -0500 Subject: [PATCH 125/497] Fix bind config file read (#125) --- snmp/bind | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/bind b/snmp/bind index d55b46624..1d66d2d18 100644 --- a/snmp/bind +++ b/snmp/bind @@ -139,7 +139,7 @@ if ( defined( $opts{c} ) ){ my $config_file=''; if ( -f $config ){ open(my $readfh, "<", $config) or die "Can't open '".$config."'"; - read($readfh , $config , 1000000); + read($readfh , $config_file , 1000000); close($readfh); #parse the config file and remove comments and empty lines From 166784e8946f282c65b573fc0acc56fd27475d43 Mon Sep 17 00:00:00 2001 From: BlackDex Date: Tue, 23 May 2017 14:44:05 +0200 Subject: [PATCH 126/497] Added try-except checks for global values. (#107) Fixed an error which prevented output. It seems some ceph version probably use different values or something. This is a quick fix to have the script output the correct values. --- agent-local/ceph | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/agent-local/ceph b/agent-local/ceph index c9bd1a0c1..29298510a 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -21,7 +21,20 @@ def cephdf(): cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') s = json.loads(cephdf) - print("c:%i:%i:%i" % (s['stats']['total_bytes'], s['stats']['total_used_bytes'], s['stats']['total_avail_bytes'])) + try: + ts = s['stats']['total_bytes'] + except: + ts = s['stats']['total_space'] + try: + tu = s['stats']['total_used_bytes'] + except: + tu = s['stats']['total_used'] + try: + ta = s['stats']['total_avail_bytes'] + except: + ta = s['stats']['total_avail'] + + print("c:%i:%i:%i" % (ts, tu, ta)) for p in s['pools']: b = p['stats']['bytes_used'] From 1c8b1bb7fb69f65581b090c8d672ae1c364d803f Mon Sep 17 00:00:00 2001 From: Aldemir Akpinar Date: Thu, 1 Jun 2017 00:23:07 +0300 Subject: [PATCH 127/497] Added devuan support for os-updates.sh and removed code repitition (#131) --- snmp/os-updates.sh | 9 +-------- 1 file changed, 1 insertion(+), 8 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index bd31874a5..c4d296767 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -51,14 +51,7 @@ if [ -f /etc/os-release ]; then else echo "0"; fi - elif [ $OS == "debian" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $UPDATES; - else - echo "0"; - fi - elif [ $OS == "ubuntu" ]; then + elif [ $OS == "debian" ] || [ $OS == "devuan" ] || [ $OS == "ubuntu" ]; then UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` if [ $UPDATES -gt 1 ]; then echo $UPDATES; From 32567dd311623418aed6e70ebf73de5957ce04c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lim=20Whiteley?= Date: Wed, 31 May 2017 22:23:38 +0100 Subject: [PATCH 128/497] Fix for first line as localhost (#130) An example output like below where the first line of output is just "localhost" so it causes the splitting to cause an out of index error. Example: cat /tmp/apache-snmp localhost ServerVersion: Apache/2.4.25 (Ubuntu) PHP/5.6.30-5+deb.sury.org~trusty+2 ServerMPM: prefork Server Built: 2016-12-21T00:00:00 CurrentTime: Thursday, 18-May-2017 19:26:43 EDT RestartTime: Thursday, 18-May-2017 11:35:48 EDT ParentServerConfigGeneration: 2 ParentServerMPMGeneration: 1 ServerUptimeSeconds: 28255 ServerUptime: 7 hours 50 minutes 55 seconds Load1: 0.04 Load5: 0.05 Load15: 0.10 Total Accesses: 5367 Total kBytes: 61432 CPUUser: 19.69 CPUSystem: 1.05 CPUChildrenUser: 0 CPUChildrenSystem: 0 CPULoad: .0734029 Uptime: 28255 ReqPerSec: .189949 BytesPerSec: 2226.38 BytesPerReq: 11721 BusyWorkers: 1 IdleWorkers: 6 Scoreboard: ___....._.__.W........................................................................................................................................ --- snmp/apache-stats.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 891bd196e..9fb62e644 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -48,7 +48,9 @@ params = {} for line in data.splitlines(): fields = line.split( ': ' ) - if fields[0] == 'Scoreboard': + if len(fields) <= 1: + continue # "localhost" as first line cause out of index error + elif fields[0] == 'Scoreboard': # count up the scoreboard into states states = {} for state in scoreboardkey: From e51611a1aa842ef8295ad518afe27c5dabb103a8 Mon Sep 17 00:00:00 2001 From: crcro Date: Tue, 6 Jun 2017 01:00:29 +0300 Subject: [PATCH 129/497] bash script for pi-hole app (#135) --- snmp/pi-hole | 79 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100644 snmp/pi-hole diff --git a/snmp/pi-hole b/snmp/pi-hole new file mode 100644 index 000000000..99309a198 --- /dev/null +++ b/snmp/pi-hole @@ -0,0 +1,79 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' + +API_AUTH_KEY="" +API_URL="localhost/admin/api.php" +URL_READ_ONLY="?summaryRaw" +URL_QUERY_TYPE="?getQueryTypes&auth=" + + +#/ Description: BASH script to get Pi-hole stats +#/ Examples: ./pi-hole-stats.sh +#/ Options: +#/ --help: Display this help message +#/ --debug: Brief check of system env and script vars +usage() { + grep '^#/' "$0" | cut -c4- ; + exit 0 ; +} + +debug() { + if ! [ -x "$(command -v tr)" ]; then + echo '[error] tr binary not available, please install it' + else + echo '[ok] tr bin'; + fi + + if ! [ -x "$(command -v jq)" ]; then + echo '[error] jq binary not available, please install it' + else + echo '[ok] jq bin'; + fi + + if ! [ -x "$(command -v curl)" ]; then + echo '[error] curl binary not available, please install it' + else + echo '[ok] curl bin' + fi + + if [ -z "$API_URL" ]; then + echo '[error] API_URL is not set' + else + echo '[ok] API_URL is set' + fi + + if [ -z $API_AUTH_KEY ]; then + echo '[warning] API_AUTH_KEY is not set, some values will not be available' + else + echo '[ok] API_AUTH_KEY is set' + fi + + if [ -z ${URL_READ_ONLY} ]; then + echo '[error] URL_READ_ONLY is not set' + else + echo '[ok] URL_READ_ONLY is set' + fi + + if [ -z ${URL_QUERY_TYPE} ]; then + echo '[error] URL_QUERY_TYPE is not set' + else + echo '[ok] URL_QUERY_TYPE not set' + fi +} + +exportdata() { + # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today + # unique_domains / queries_forwarded / queries_cached + GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.[]') + echo $GET_STATS | tr " " "\n" + # A / AAAA / PTR / SRV + GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[][]') + echo $GET_QUERY_TYPE | tr " " "\n" +} + +if [ -z $* ]; then + exportdata +fi +expr "$*" : ".*--help" > /dev/null && usage +expr "$*" : ".*--debug" > /dev/null && debug From ff2eaa8d14cfc699d168f195c55c1f35d41f8a44 Mon Sep 17 00:00:00 2001 From: einarjh Date: Sat, 10 Jun 2017 11:20:48 +0200 Subject: [PATCH 130/497] Strip all non-ASCII characters from hddtemp output (#136) --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index e2b99d759..9098ec53a 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -32,7 +32,7 @@ if [ "${hddtemp}" != "" ]; then else output=`${hddtemp} -w -q ${disks} 2>/dev/null` fi - content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g'` + content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From cae5227c42b71ab07e24ef5507a79e1a7eabf591 Mon Sep 17 00:00:00 2001 From: Stefan Funke Date: Wed, 28 Jun 2017 22:36:26 +0200 Subject: [PATCH 131/497] unnecessary use of wc while already calling grep (#137) * useless call of wc while already calling grep * move grep count call to CMD_GREP to stay in project style --- snmp/os-updates.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index c4d296767..b015abb19 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -13,6 +13,7 @@ BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' BIN_GREP='/bin/grep' +CMD_GREP='-c' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' CMD_ZYPPER='lu' @@ -52,7 +53,7 @@ if [ -f /etc/os-release ]; then echo "0"; fi elif [ $OS == "debian" ] || [ $OS == "devuan" ] || [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP 'Inst' | $BIN_WC $CMD_WC` + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -gt 1 ]; then echo $UPDATES; else From e4e6282a9ef1164f9ffb285839ab6d83a3294d49 Mon Sep 17 00:00:00 2001 From: RedChops Date: Thu, 29 Jun 2017 16:11:26 -0400 Subject: [PATCH 132/497] Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) --- snmp/postgres | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/postgres b/snmp/postgres index 5897d0618..c03b2ccb4 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -108,6 +108,7 @@ BEGIN{ toAdd=1; } END{ + OFMT = "%.0f" print backends; print commits; print rollbacks; From 5f05fbd54d6912d271af3e1e71ef021d8583eb60 Mon Sep 17 00:00:00 2001 From: drid Date: Wed, 12 Jul 2017 22:55:02 +0300 Subject: [PATCH 133/497] C.H.I.P. power values (#134) * C.H.I.P. power values * Added attribution * Fix ACIN current calculation * Battery current fix --- snmp/chip.sh | 87 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 snmp/chip.sh diff --git a/snmp/chip.sh b/snmp/chip.sh new file mode 100644 index 000000000..07012d906 --- /dev/null +++ b/snmp/chip.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# Based on https://github.com/Photonicsguy/CHIP +# Enable ADC registers +i2cset -y -f 0 0x34 0x82 0xff + +## REGISTER 00 ## +REG=$(i2cget -y -f 0 0x34 0x00) +STATUS_ACIN=$(($(($REG&0x80))/128)) +STATUS_VBUS=$(($(($REG&0x20))/32)) +STATUS_CHG_DIR=$(($(($REG&0x04))/4)) + +REG=$(i2cget -y -f 0 0x34 0x01) +STATUS_CHARGING=$(($(($REG&0x40))/64)) +STATUS_BATCON=$(($(($REG&0x20))/32)) + +BAT_C=0 +BAT_D=0 + +if [ $STATUS_ACIN == 1 ]; then + # ACIN voltage + REG=`i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + ACIN=`echo "$REG*0.0017"|bc` + # ACIN Current + REG=`i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + ACIN_C=`echo "$REG*0.000625"|bc` +else + ACIN=0 + ACIN_C=0 +fi + +if [ $STATUS_VBUS == 1 ]; then + # VBUS voltage + REG=`i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBUS=`echo "$REG*0.0017"|bc` + + # VBUS Current + REG=`i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBUS_C=`echo "$REG*0.000375"|bc` +else + VBUS=0 + VBUS_C=0 +fi + +if [ $STATUS_BATCON == 1 ]; then + # Battery Voltage + REG=`i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG=`printf "%d" "$REG"` + VBAT=`echo "$REG*0.0011"|bc` + + if [ $STATUS_CHG_DIR == 1 ]; then + # Battery Charging Current + REG=`i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG_C=`printf "%d" "$REG"` + BAT_C=`echo "scale=2;$REG_C*0.001"|bc` + else + # Battery Discharge Current + REG=`i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` + REG_D=`printf "%d" "$REG"` + BAT_D=`echo "scale=2;$REG_D*0.001"|bc` + fi + # Battery % + REG=`i2cget -y -f 0 0x34 0xB9` + BAT_PERCENT=`printf "%d" "$REG"` +else + VBAT=0 + BATT_CUR=0 + BAT_PERCENT=0 +fi + +# Temperature +REG=`i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` +REG=`printf "%d" "$REG"` +THERM=`echo "($REG*0.1)-144.7"|bc` + +echo $THERM +echo $ACIN +echo $ACIN_C +echo $VBUS +echo $VBUS_C +echo $VBAT +echo $(echo "$BAT_C-$BAT_D"|bc) +echo $BAT_PERCENT +echo $STATUS_CHARGING From c3bd030d0f26d7e72c95a18664a344b7cab79994 Mon Sep 17 00:00:00 2001 From: Neil Lathwood Date: Thu, 3 Aug 2017 17:11:26 +0100 Subject: [PATCH 134/497] Update raspberry.sh (#140) --- snmp/raspberry.sh | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 39840949f..575a6fb16 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -31,4 +31,17 @@ sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMPG4 | $pised 's/enabled/2/g' +sudo $picmd $getStatusMJPG | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusH264 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMPG2 | $pised 's/disabled/1/g' +sudo $picmd $getStatusWVC1 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMPG4 | $pised 's/disabled/1/g' +sudo $picmd $getStatusMJPG | $pised 's/disabled/1/g' +sudo $picmd $getStatusWMV9 | $pised 's/disabled/1/g' From ecd6448d17fe2ece02672e1d48503ae4ac451283 Mon Sep 17 00:00:00 2001 From: Zucht Date: Sat, 12 Aug 2017 17:30:02 +0200 Subject: [PATCH 135/497] Update raspberry.sh (#143) Fix state WMV9 --- snmp/raspberry.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 575a6fb16..f5c57f827 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -31,7 +31,7 @@ sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' +sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' From 46256ed9d00099e80342d847209986083e97afe8 Mon Sep 17 00:00:00 2001 From: arrmo Date: Sun, 27 Aug 2017 14:59:15 -0500 Subject: [PATCH 136/497] Update Distro, for Raspbian Support (#144) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 5886e5cd0..639ac225b 100755 --- a/snmp/distro +++ b/snmp/distro @@ -47,6 +47,10 @@ elif [ "${OS}" = "Linux" ] ; then elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + if [ "${ID}" = "Raspbian" ] ; then + DIST="Raspbian `cat /etc/debian_version`" + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 70f424396d24ee43a2fd2a40ac7878297318223c Mon Sep 17 00:00:00 2001 From: Uwe Arzt Date: Wed, 6 Sep 2017 20:42:58 +0200 Subject: [PATCH 137/497] Add Oracle Linux Distribution to distro script (#146) * Add Oracle Linux to distro script * Revert local change --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index 639ac225b..61ad2488c 100755 --- a/snmp/distro +++ b/snmp/distro @@ -28,6 +28,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Mandriva" PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` + elif [ -f /etc/oracle-release ]; then + DIST="Oracle" else DIST="RedHat" fi From 73fae440f6885ebff03a26cf5dd9e5eb18f9fc7f Mon Sep 17 00:00:00 2001 From: tomarch Date: Wed, 20 Sep 2017 21:47:11 +0200 Subject: [PATCH 138/497] fix munin agent (#148) Without the full munin-scripts path, this script won't find munin file and return nothing. --- agent-local/munin | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/munin b/agent-local/munin index 47e513fa5..a95f3f145 100755 --- a/agent-local/munin +++ b/agent-local/munin @@ -1,6 +1,6 @@ # Lokale Einzelchecks export MUNIN_LIBDIR=/usr/share/munin -if cd munin-scripts +if cd $MUNIN_LIBDIR/munin-scripts then for skript in $(ls) do From 195dcfff547872df9d1b229435f726ed8fdb4be9 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Tue, 10 Oct 2017 08:02:05 +1300 Subject: [PATCH 139/497] os-update.sh: back to package management based and count fixes (#149) * Update os-updates.sh * Update os-updates.sh * Update os-updates.sh --- snmp/os-updates.sh | 84 +++++++++++++++++++++++----------------------- 1 file changed, 42 insertions(+), 42 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index b015abb19..6986c1d8c 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -10,13 +10,12 @@ #--------------------------------------------------------------# # please make sure you have the path/binaries below # ################################################################ -BIN_AWK='/usr/bin/awk' BIN_WC='/usr/bin/wc' BIN_GREP='/bin/grep' CMD_GREP='-c' CMD_WC='-l' BIN_ZYPPER='/usr/bin/zypper' -CMD_ZYPPER='lu' +CMD_ZYPPER='-q lu' BIN_YUM='/usr/bin/yum' CMD_YUM='-q check-update' BIN_DNF='/usr/bin/dnf' @@ -29,45 +28,46 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f /etc/os-release ]; then - OS=`$BIN_AWK -F= '/^ID=/{print $2}' /etc/os-release` - if [ $OS == "opensuse" ]; then - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 3 ]; then - echo $(($UPDATES-3)); - else - echo "0"; - fi - elif [ $OS == "\"centos\"" ]; then - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 6 ]; then - echo $(($UPDATES-6)); - else - echo "0"; - fi - elif [ $OS == "fedora" ]; then - UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 6 ]; then - echo $(($UPDATES-6)); - else - echo "0"; - fi - elif [ $OS == "debian" ] || [ $OS == "devuan" ] || [ $OS == "ubuntu" ]; then - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -gt 1 ]; then - echo $UPDATES; - else - echo "0"; - fi - elif [ $OS == "arch" ]; then - UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then - echo $(($UPDATES-1)); - else - echo "0"; - fi - fi +if [ -f $BIN_ZYPPER ]; then + # OpenSUSE + UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 2 ]; then + echo $(($UPDATES-2)); + else + echo "0"; + fi +elif [ -f $BIN_DNF ]; then + # Fedora + UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_PACMAN ]; then + # Arch + UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_YUM ]; then + # CentOS / Redhat + UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` + if [ $UPDATES -gt 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif [ -f $BIN_APT ]; then + # Debian / Devuan / Ubuntu + UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` + if [ $UPDATES -gt 1 ]; then + echo $UPDATES; + else + echo "0"; + fi else - echo "0"; + echo "0"; fi - From 5ab8aa05fd7856aa00446f851e9792e830c59d89 Mon Sep 17 00:00:00 2001 From: dragans Date: Fri, 27 Oct 2017 07:39:09 +0200 Subject: [PATCH 140/497] fix: Update mysql (#127) Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. --- agent-local/mysql | 492 ++++++++++++++++++++++++++++++++-------------- 1 file changed, 342 insertions(+), 150 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 4db91f689..3b8b30427 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -7,24 +7,11 @@ ### This script requires php-cli and php-mysql packages # ============================================================================ -# This is a script to retrieve information from a MySQL server for input to a -# Cacti graphing process. It is hosted at -# http://code.google.com/p/mysql-cacti-templates/. -# -# This program is copyright (c) 2007 Baron Schwartz. Feedback and improvements -# are welcome. -# -# THIS PROGRAM IS PROVIDED "AS IS" AND WITHOUT ANY EXPRESS OR IMPLIED -# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF -# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. -# -# This program is free software; you can redistribute it and/or modify it under -# the terms of the GNU General Public License as published by the Free Software -# Foundation, version 2. -# -# You should have received a copy of the GNU General Public License along with -# this program; if not, write to the Free Software Foundation, Inc., 59 Temple -# Place, Suite 330, Boston, MA 02111-1307 USA. +# This program is part of Percona Monitoring Plugins +# License: GPL License (see COPYING) +# Copyright 2008-2016 Baron Schwartz, 2012-2016 Percona +# Authors: +# Baron Schwartz, Roman Vynar # ============================================================================ # ============================================================================ @@ -50,9 +37,19 @@ $mysql_pass = ''; $mysql_host = 'localhost'; $mysql_port = 3306; $mysql_ssl = FALSE; # Whether to use SSL to connect to MySQL. +$mysql_ssl_key = '/etc/pki/tls/certs/mysql/client-key.pem'; +$mysql_ssl_cert = '/etc/pki/tls/certs/mysql/client-cert.pem'; +$mysql_ssl_ca = '/etc/pki/tls/certs/mysql/ca-cert.pem'; +$mysql_connection_timeout = 5; + +$heartbeat = FALSE; # Whether to use pt-heartbeat table for repl. delay calculation. +$heartbeat_utc = FALSE; # Whether pt-heartbeat is run with --utc option. +$heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if no preference. +$heartbeat_table = 'percona.heartbeat'; # db.tbl. + -$heartbeat = ''; # db.tbl in case you use mk-heartbeat from Maatkit. $cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. $chk_options = array ( @@ -60,6 +57,7 @@ $chk_options = array ( 'master' => true, # Do you want to check binary logging? 'slave' => true, # Do you want to check slave status? 'procs' => true, # Do you want to check SHOW PROCESSLIST? + 'get_qrt' => true, # Get query response times from Percona Server or MariaDB? ); $use_ss = FALSE; # Whether to use the script server or not @@ -78,6 +76,7 @@ echo("<<>>\n"); if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); + debug('Found configuration file ' . __FILE__ . '.cnf'); } else { echo("No ".__FILE__ . ".cnf found!\n"); exit(); @@ -115,6 +114,19 @@ function error_handler($errno, $errstr, $errfile, $errline) { # } #} +# ============================================================================ +# Set the default timezone either to the configured, system timezone, or the +# default set above in the script. +# ============================================================================ +if ( function_exists("date_default_timezone_set") + && function_exists("date_default_timezone_get") ) { + $tz = ($timezone ? $timezone : @date_default_timezone_get()); + if ( $tz ) { + @date_default_timezone_set($tz); + } +} + + # ============================================================================ # Make sure we can also be called as a script. # ============================================================================ @@ -172,7 +184,7 @@ if (!function_exists('array_change_key_case') ) { # ============================================================================ function validate_options($options) { debug($options); - $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port', 'server-id'); # Required command-line options foreach ( array() as $option ) { if (!isset($options[$option]) || !$options[$option] ) { @@ -190,21 +202,23 @@ function validate_options($options) { # Print out a brief usage summary # ============================================================================ function usage($message) { - global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port; $usage = << --items [OPTION] - - --host Hostname to connect to; use host:port syntax to specify a port - Use :/path/to/socket if you want to connect via a UNIX socket - --items Comma-separated list of the items whose data you want - --user MySQL username; defaults to $mysql_user if not given - --pass MySQL password; defaults to $mysql_pass if not given - --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) - --nocache Do not cache results in a file - --port MySQL port; defaults to $mysql_port if not given - --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call +Usage: php ss_get_mysql_stats.php --host --items [OPTION] + + --host MySQL host + --items Comma-separated list of the items whose data you want + --user MySQL username + --pass MySQL password + --port MySQL port + --socket MySQL socket + --flags MySQL flags + --connection-timeout MySQL connection timeout + --server-id Server id to associate with a heartbeat if heartbeat usage is enabled + --nocache Do not cache results in a file + --help Show usage EOF; die($usage); @@ -256,8 +270,11 @@ function parse_cmdline( $args ) { # ============================================================================ function ss_get_mysql_stats( $options ) { # Process connection options and connect to MySQL. - global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, - $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + global $debug, $mysql_host, $mysql_user, $mysql_pass, $cache_dir, $poll_time, $chk_options, + $mysql_port, $mysql_socket, $mysql_flags, + $mysql_ssl, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, + $mysql_connection_timeout, + $heartbeat, $heartbeat_table, $heartbeat_server_id, $heartbeat_utc; # Connect to MySQL. $user = isset($options['user']) ? $options['user'] : $mysql_user; @@ -265,26 +282,15 @@ function ss_get_mysql_stats( $options ) { $port = isset($options['port']) ? $options['port'] : $mysql_port; $host = isset($options['host']) ? $options['host'] : $mysql_host; - $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + $socket = isset($options['socket']) ? $options['socket'] : $mysql_socket; + $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; + $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; + $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); - debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysqli') ) { - debug("The MySQL extension is not loaded"); - die("The MySQL extension is not loaded"); - } - if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, - $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); - } - else { - $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); - } - if (!$conn ) { - die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : - (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); - } + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); $cache_file = "$cache_dir/agent-local-mysql"; @@ -292,12 +298,12 @@ function ss_get_mysql_stats( $options ) { # First, check the cache. $fp = null; - if (!isset($options['nocache']) ) { - if ($fp = fopen($cache_file, 'a+') ) { + if ( $cache_dir && !array_key_exists('nocache', $options) ) { + if ( $fp = fopen($cache_file, 'a+') ) { $locked = flock($fp, 1); # LOCK_SH - if ($locked ) { - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( $locked ) { + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -307,12 +313,12 @@ function ss_get_mysql_stats( $options ) { else { debug("The cache file seems too small or stale"); # Escalate the lock to exclusive, so we can write to it. - if (flock($fp, 2) ) { # LOCK_EX + if ( flock($fp, 2) ) { # LOCK_EX # We might have blocked while waiting for that LOCK_EX, and # another process ran and updated it. Let's see if we can just # return the data now: - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -324,48 +330,79 @@ function ss_get_mysql_stats( $options ) { } } else { - debug("Couldn't lock the cache file, ignoring it."); $fp = null; + debug("Couldn't lock the cache file, ignoring it"); } } + else { + $fp = null; + debug("Couldn't open the cache file"); + } } else { - $fp = null; - debug("Couldn't open the cache file"); + debug("Caching is disabled."); } + # Connect to MySQL. + debug(array('Connecting to', $host, $port, $user, $pass)); + if ( !extension_loaded('mysqli') ) { + debug("PHP MySQLi extension is not loaded"); + die("PHP MySQLi extension is not loaded"); + } + if ( $mysql_ssl ) { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + else { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + if ( mysqli_connect_errno() ) { + debug("MySQL connection failed: " . mysqli_connect_error()); + die("ERROR: " . mysqli_connect_error()); + } + + # MySQL server version. + # The form of this version number is main_version * 10000 + minor_version * 100 + sub_version + # i.e. version 5.5.44 is 50544. + $mysql_version = mysqli_get_server_version($conn); + debug("MySQL server version is " . $mysql_version); + # Set up variables. $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc # Define some indexes so they don't cause errors with += operations. 'relay_log_space' => null, 'binary_log_space' => null, - 'current_transactions' => null, - 'locked_transactions' => null, - 'active_transactions' => null, - 'innodb_locked_tables' => null, - 'innodb_tables_in_use' => null, - 'innodb_lock_structs' => null, - 'innodb_lock_wait_secs' => null, - 'innodb_sem_waits' => null, - 'innodb_sem_wait_time_ms'=> null, + 'current_transactions' => 0, + 'locked_transactions' => 0, + 'active_transactions' => 0, + 'innodb_locked_tables' => 0, + 'innodb_tables_in_use' => 0, + 'innodb_lock_structs' => 0, + 'innodb_lock_wait_secs' => 0, + 'innodb_sem_waits' => 0, + 'innodb_sem_wait_time_ms'=> 0, # Values for the 'state' column from SHOW PROCESSLIST (converted to # lowercase, with spaces replaced by underscores) - 'State_closing_tables' => null, - 'State_copying_to_tmp_table' => null, - 'State_end' => null, - 'State_freeing_items' => null, - 'State_init' => null, - 'State_locked' => null, - 'State_login' => null, - 'State_preparing' => null, - 'State_reading_from_net' => null, - 'State_sending_data' => null, - 'State_sorting_result' => null, - 'State_statistics' => null, - 'State_updating' => null, - 'State_writing_to_net' => null, - 'State_none' => null, - 'State_other' => null, # Everything not listed above + 'State_closing_tables' => 0, + 'State_copying_to_tmp_table' => 0, + 'State_end' => 0, + 'State_freeing_items' => 0, + 'State_init' => 0, + 'State_locked' => 0, + 'State_login' => 0, + 'State_preparing' => 0, + 'State_reading_from_net' => 0, + 'State_sending_data' => 0, + 'State_sorting_result' => 0, + 'State_statistics' => 0, + 'State_updating' => 0, + 'State_writing_to_net' => 0, + 'State_none' => 0, + 'State_other' => 0, # Everything not listed above ); # Get SHOW STATUS and convert the name-value array into a simple @@ -382,8 +419,15 @@ function ss_get_mysql_stats( $options ) { } # Get SHOW SLAVE STATUS, and add it to the $status array. - if ($chk_options['slave'] ) { - $result = run_query("SHOW SLAVE STATUS", $conn); + if ( $chk_options['slave'] ) { + # Leverage lock-free SHOW SLAVE STATUS if available + $result = run_query("SHOW SLAVE STATUS NONBLOCKING", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS NOLOCK", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + } + } $slave_status_rows_gotten = 0; foreach ( $result as $row ) { $slave_status_rows_gotten++; @@ -394,23 +438,30 @@ function ss_get_mysql_stats( $options ) { $status['slave_lag'] = $row['seconds_behind_master']; # Check replication heartbeat, if present. - if ($heartbeat ) { + if ( $heartbeat ) { + if ( $heartbeat_utc ) { + $now_func = 'UNIX_TIMESTAMP(UTC_TIMESTAMP)'; + } + else { + $now_func = 'UNIX_TIMESTAMP()'; + } $result2 = run_query( - "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" - . " AS delay FROM $heartbeat WHERE id = 1", $conn); + "SELECT MAX($now_func - ROUND(UNIX_TIMESTAMP(ts)))" + . " AS delay FROM $heartbeat_table" + . " WHERE $heartbeat_server_id = 0 OR server_id = $heartbeat_server_id", $conn); $slave_delay_rows_gotten = 0; foreach ( $result2 as $row2 ) { $slave_delay_rows_gotten++; - if ($row2 && is_array($row2) + if ( $row2 && is_array($row2) && array_key_exists('delay', $row2) ) { $status['slave_lag'] = $row2['delay']; } else { - debug("Couldn't get slave lag from $heartbeat"); + debug("Couldn't get slave lag from $heartbeat_table"); } } - if ($slave_delay_rows_gotten == 0 ) { + if ( $slave_delay_rows_gotten == 0 ) { debug("Got nothing from heartbeat query"); } } @@ -421,11 +472,11 @@ function ss_get_mysql_stats( $options ) { $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') ? 0 : $status['slave_lag']; } - if ($slave_status_rows_gotten == 0 ) { + if ( $slave_status_rows_gotten == 0 ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) @@ -449,18 +500,22 @@ function ss_get_mysql_stats( $options ) { # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array # too. - if ($chk_options['procs'] ) { + if ( $chk_options['procs'] ) { $result = run_query('SHOW PROCESSLIST', $conn); foreach ( $result as $row ) { $state = $row['State']; - if (is_null($state) ) { + if ( is_null($state) ) { $state = 'NULL'; } - if ($state == '' ) { + if ( $state == '' ) { $state = 'none'; } + # MySQL 5.5 replaces the 'Locked' state with a variety of "Waiting for + # X lock" types of statuses. Wrap these all back into "Locked" because + # we don't really care about the type of locking it is. + $state = preg_replace('/^(Table lock|Waiting for .*lock)$/', 'Locked', $state); $state = str_replace(' ', '_', strtolower($state)); - if (array_key_exists("State_$state", $status) ) { + if ( array_key_exists("State_$state", $status) ) { increment($status, "State_$state", 1); } else { @@ -469,15 +524,63 @@ function ss_get_mysql_stats( $options ) { } } + # Get SHOW ENGINES to be able to determine whether InnoDB is present. + $engines = array(); + $result = run_query("SHOW ENGINES", $conn); + foreach ( $result as $row ) { + $engines[$row[0]] = $row[1]; + } + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add # those to the array too. if ($chk_options['innodb'] - && array_key_exists('have_innodb', $status) - && $status['have_innodb'] == 'YES' + && array_key_exists('InnoDB', $engines) + && $engines['InnoDB'] == 'YES' + || $engines['InnoDB'] == 'DEFAULT' ) { $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); $istatus_text = $result[0]['Status']; - $istatus_vals = get_innodb_array($istatus_text); + $istatus_vals = get_innodb_array($istatus_text, $mysql_version); + + # Get response time histogram from Percona Server or MariaDB if enabled. + if ( $chk_options['get_qrt'] + && (( isset($status['have_response_time_distribution']) + && $status['have_response_time_distribution'] == 'YES') + || (isset($status['query_response_time_stats']) + && $status['query_response_time_stats'] == 'ON')) ) + { + debug('Getting query time histogram'); + $i = 0; + $result = run_query( + "SELECT `count`, ROUND(total * 1000000) AS total " + . "FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME " + . "WHERE `time` <> 'TOO LONG'", + $conn); + foreach ( $result as $row ) { + if ( $i > 13 ) { + # It's possible that the number of rows returned isn't 14. + # Don't add extra status counters. + break; + } + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = $row['count']; + $status[$total_key] = $row['total']; + $i++; + } + # It's also possible that the number of rows returned is too few. + # Don't leave any status counters unassigned; it will break graphs. + while ( $i <= 13 ) { + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = 0; + $status[$total_key] = 0; + $i++; + } + } + else { + debug('Not getting time histogram because it is not enabled'); + } # Override values from InnoDB parsing with values from SHOW STATUS, # because InnoDB status might not have everything and the SHOW STATUS is @@ -498,6 +601,8 @@ function ss_get_mysql_stats( $options ) { 'Innodb_rows_inserted' => 'rows_inserted', 'Innodb_rows_read' => 'rows_read', 'Innodb_rows_updated' => 'rows_updated', + 'Innodb_buffer_pool_reads' => 'pool_reads', + 'Innodb_buffer_pool_read_requests' => 'pool_read_requests', ); # If the SHOW STATUS value exists, override... @@ -540,9 +645,9 @@ function ss_get_mysql_stats( $options ) { } # Define the variables to output. I use shortened variable names so maybe - # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must - # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses - # it and uses it as a Perl variable. + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. + # This list must come right after the word MAGIC_VARS_DEFINITIONS. The Perl script + # parses it and uses it as a Perl variable. $keys = array( 'Key_read_requests' => 'a0', 'Key_reads' => 'a1', @@ -654,7 +759,6 @@ function ss_get_mysql_stats( $options ) { 'binary_log_space' => 'cz', 'innodb_locked_tables' => 'd0', 'innodb_lock_structs' => 'd1', - 'State_closing_tables' => 'd2', 'State_copying_to_tmp_table' => 'd3', 'State_end' => 'd4', @@ -671,7 +775,6 @@ function ss_get_mysql_stats( $options ) { 'State_writing_to_net' => 'df', 'State_none' => 'dg', 'State_other' => 'dh', - 'Handler_commit' => 'di', 'Handler_delete' => 'dj', 'Handler_discover' => 'dk', @@ -713,6 +816,53 @@ function ss_get_mysql_stats( $options ) { 'key_buffer_size' => 'ei', 'Innodb_row_lock_time' => 'ej', 'Innodb_row_lock_waits' => 'ek', + + # Values not parsed by LibreNMS + 'Query_time_count_00' => 'ol', + 'Query_time_count_01' => 'om', + 'Query_time_count_02' => 'on', + 'Query_time_count_03' => 'oo', + 'Query_time_count_04' => 'op', + 'Query_time_count_05' => 'oq', + 'Query_time_count_06' => 'or', + 'Query_time_count_07' => 'os', + 'Query_time_count_08' => 'ot', + 'Query_time_count_09' => 'ou', + 'Query_time_count_10' => 'ov', + 'Query_time_count_11' => 'ow', + 'Query_time_count_12' => 'ox', + 'Query_time_count_13' => 'oy', + 'Query_time_total_00' => 'oz', + 'Query_time_total_01' => 'pg', + 'Query_time_total_02' => 'ph', + 'Query_time_total_03' => 'pi', + 'Query_time_total_04' => 'pj', + 'Query_time_total_05' => 'pk', + 'Query_time_total_06' => 'pl', + 'Query_time_total_07' => 'pm', + 'Query_time_total_08' => 'pn', + 'Query_time_total_09' => 'po', + 'Query_time_total_10' => 'pp', + 'Query_time_total_11' => 'pq', + 'Query_time_total_12' => 'pr', + 'Query_time_total_13' => 'ps', + 'wsrep_replicated_bytes' => 'pt', + 'wsrep_received_bytes' => 'pu', + 'wsrep_replicated' => 'pv', + 'wsrep_received' => 'pw', + 'wsrep_local_cert_failures' => 'px', + 'wsrep_local_bf_aborts' => 'py', + 'wsrep_local_send_queue' => 'pz', + 'wsrep_local_recv_queue' => 'qg', + 'wsrep_cluster_size' => 'qh', + 'wsrep_cert_deps_distance' => 'qi', + 'wsrep_apply_window' => 'qj', + 'wsrep_commit_window' => 'qk', + 'wsrep_flow_control_paused' => 'ql', + 'wsrep_flow_control_sent' => 'qm', + 'wsrep_flow_control_recv' => 'qn', + 'pool_reads' => 'qo', + 'pool_read_requests' => 'qp', ); # Return the output. @@ -741,7 +891,7 @@ function ss_get_mysql_stats( $options ) { # MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note # that extra leading spaces are ignored due to trim(). # ============================================================================ -function get_innodb_array($text) { +function get_innodb_array($text, $mysql_version) { $results = array( 'spin_waits' => array(), 'spin_rounds' => array(), @@ -815,13 +965,26 @@ function get_innodb_array($text) { $results['spin_rounds'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[8]); } - elseif (strpos($line, 'RW-shared spins') === 0 ) { + elseif (strpos($line, 'RW-shared spins') === 0 + && strpos($line, ';') > 0 ) { # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 $results['spin_waits'][] = to_int($row[2]); $results['spin_waits'][] = to_int($row[8]); $results['os_waits'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[11]); } + elseif (strpos($line, 'RW-shared spins') === 0 && strpos($line, '; RW-excl spins') === FALSE) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-shared spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } + elseif (strpos($line, 'RW-excl spins') === 0) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-excl spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } elseif (strpos($line, 'seconds the semaphore:') > 0) { # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: increment($results, 'innodb_sem_waits', 1); @@ -830,18 +993,35 @@ function get_innodb_array($text) { } # TRANSACTIONS - elseif (strpos($line, 'Trx id counter') === 0 ) { + elseif ( strpos($line, 'Trx id counter') === 0 ) { # The beginning of the TRANSACTIONS section: start counting # transactions - # Trx id counter 0 1170664159 - # Trx id counter 861B144C - $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = isset($row[4]) ? make_bigint( + $row[3], $row[4]) : base_convert($row[3], 16, 10); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Trx id counter 2903813 + $results['innodb_transactions'] = $row[3]; + } $txn_seen = TRUE; } - elseif (strpos($line, 'Purge done for trx') === 0 ) { - # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 - # Purge done for trx's n:o < 861B135D undo n:o < 0 - $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + elseif ( strpos($line, 'Purge done for trx') === 0 ) { + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = $row[7] == 'undo' ? base_convert($row[6], 16, 10) : make_bigint($row[6], $row[7]); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Purge done for trx's n:o < 2903354 undo n:o < 0 state: running but idle + $purged_to = $row[6]; + } $results['unpurged_txns'] = big_sub($results['innodb_transactions'], $purged_to); } @@ -849,31 +1029,31 @@ function get_innodb_array($text) { # History list length 132 $results['history_list'] = to_int($row[3]); } - elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + elseif ( $txn_seen && strpos($line, '---TRANSACTION') === 0 ) { # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 increment($results, 'current_transactions', 1); - if (strpos($line, 'ACTIVE') > 0 ) { + if ( strpos($line, 'ACTIVE') > 0 ) { increment($results, 'active_transactions', 1); } } - elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + elseif ( $txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: increment($results, 'innodb_lock_wait_secs', to_int($row[5])); } - elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + elseif ( strpos($line, 'read views open inside InnoDB') > 0 ) { # 1 read views open inside InnoDB $results['read_views'] = to_int($row[0]); } - elseif (strpos($line, 'mysql tables in use') === 0 ) { + elseif ( strpos($line, 'mysql tables in use') === 0 ) { # mysql tables in use 2, locked 2 increment($results, 'innodb_tables_in_use', to_int($row[4])); increment($results, 'innodb_locked_tables', to_int($row[6])); } - elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + elseif ( $txn_seen && strpos($line, 'lock struct(s)') > 0 ) { # 23 lock struct(s), heap size 3024, undo log entries 27 # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 # LOCK WAIT 2 lock struct(s), heap size 368 - if (strpos($line, 'LOCK WAIT') === 0 ) { + if ( strpos($line, 'LOCK WAIT') === 0 ) { increment($results, 'innodb_lock_structs', to_int($row[2])); increment($results, 'locked_transactions', 1); } @@ -900,7 +1080,7 @@ function get_innodb_array($text) { $results['pending_aio_log_ios'] = to_int($row[6]); $results['pending_aio_sync_ios'] = to_int($row[9]); } - elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + elseif ( strpos($line, 'Pending flushes (fsync)') === 0 ) { # Pending flushes (fsync) log: 0; buffer pool: 0 $results['pending_log_flushes'] = to_int($row[4]); $results['pending_buf_pool_flushes'] = to_int($row[7]); @@ -921,6 +1101,16 @@ function get_innodb_array($text) { $results['ibuf_used_cells'] = to_int($row[2]); $results['ibuf_free_cells'] = to_int($row[6]); $results['ibuf_cell_count'] = to_int($row[9]); + if (strpos($line, 'merges')) { + $results['ibuf_merges'] = to_int($row[10]); + } + } + elseif (strpos($line, ', delete mark ') > 0 && strpos($prev_line, 'merged operations:') === 0 ) { + # Output of show engine innodb status has changed in 5.5 + # merged operations: + # insert 593983, delete mark 387006, delete 73092 + $results['ibuf_inserts'] = to_int($row[1]); + $results['ibuf_merged'] = to_int($row[1]) + to_int($row[4]) + to_int($row[6]); } elseif (strpos($line, ' merged recs, ') > 0 ) { # 19817685 inserts, 19817684 merged recs, 3552620 merges @@ -976,40 +1166,41 @@ function get_innodb_array($text) { } # BUFFER POOL AND MEMORY - elseif (strpos($line, "Total memory allocated") === 0 ) { + elseif (strpos($line, "Total memory allocated") === 0 && strpos($line, "in additional pool allocated") > 0 ) { # Total memory allocated 29642194944; in additional pool allocated 0 + # Total memory allocated by read views 96 $results['total_mem_alloc'] = to_int($row[3]); $results['additional_pool_alloc'] = to_int($row[8]); } - elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + elseif(strpos($line, 'Adaptive hash index ') === 0 ) { # Adaptive hash index 1538240664 (186998824 + 1351241840) $results['adaptive_hash_memory'] = to_int($row[3]); } - elseif (strpos($line, 'Page hash ') === 0 ) { + elseif(strpos($line, 'Page hash ') === 0 ) { # Page hash 11688584 $results['page_hash_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Dictionary cache ') === 0 ) { + elseif(strpos($line, 'Dictionary cache ') === 0 ) { # Dictionary cache 145525560 (140250984 + 5274576) $results['dictionary_cache_memory'] = to_int($row[2]); } - elseif (strpos($line, 'File system ') === 0 ) { + elseif(strpos($line, 'File system ') === 0 ) { # File system 313848 (82672 + 231176) $results['file_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Lock system ') === 0 ) { + elseif(strpos($line, 'Lock system ') === 0 ) { # Lock system 29232616 (29219368 + 13248) $results['lock_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Recovery system ') === 0 ) { + elseif(strpos($line, 'Recovery system ') === 0 ) { # Recovery system 0 (0 + 0) $results['recovery_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Threads ') === 0 ) { + elseif(strpos($line, 'Threads ') === 0 ) { # Threads 409336 (406936 + 2400) $results['thread_hash_memory'] = to_int($row[1]); } - elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + elseif(strpos($line, 'innodb_io_pattern ') === 0 ) { # innodb_io_pattern 0 (0 + 0) $results['innodb_io_pattern_memory'] = to_int($row[1]); } @@ -1057,6 +1248,7 @@ function get_innodb_array($text) { $results['queries_inside'] = to_int($row[0]); $results['queries_queued'] = to_int($row[4]); } + $prev_line = $line; } foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { @@ -1067,16 +1259,9 @@ function get_innodb_array($text) { $results['uncheckpointed_bytes'] = big_sub($results['log_bytes_written'], $results['last_checkpoint']); - -# foreach ($results as $key => $value) { -# echo(strtolower($key).":".strtolower($value)."\n"); -# } - - return $results; } - # ============================================================================ # Returns a bigint from two ulint or a single hex number. This is tested in # t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. @@ -1121,27 +1306,34 @@ function to_int ( $str ) { # ============================================================================ # Wrap mysql_query in error-handling, and instead of returning the result, # return an array of arrays in the result. +# ============================================================================ + # ============================================================================ function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysqli_query( $conn, $sql); - if ($debug ) { - $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); - if ($error ) { + $result = @mysqli_query($conn, $sql); + if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { + $error = @mysqli_error($conn); + if ( $error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } debug(array($sql, $array)); return $array; } -# ============================================================================ # Safely increments a value that might be null. # ============================================================================ function increment(&$arr, $key, $howmuch) { From fca5da5882bb40588a70587e5fd875f2d722ba04 Mon Sep 17 00:00:00 2001 From: Daniel Bull Date: Fri, 27 Oct 2017 06:41:05 +0100 Subject: [PATCH 141/497] Fix: Apache SNMP extend IndexError (#116) See issue for more information: https://github.com/librenms/librenms-agent/issues/95 --- snmp/apache-stats.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 9fb62e644..378d858e8 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -60,7 +60,7 @@ elif fields[0] == 'Total kBytes': # turn into base (byte) value params[fields[0]] = int(fields[1])*1024 - else: + elif len(fields) > 1: # just store everything else params[fields[0]] = fields[1] From 9c8b94ae485880949e4eb105eef9e13173ab91b2 Mon Sep 17 00:00:00 2001 From: arrmo Date: Mon, 4 Dec 2017 14:11:17 -0600 Subject: [PATCH 142/497] hddtemp, ignore devices not supporting SMART (#153) --- agent-local/hddtemp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 9098ec53a..9f776829c 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -32,7 +32,7 @@ if [ "${hddtemp}" != "" ]; then else output=`${hddtemp} -w -q ${disks} 2>/dev/null` fi - content=`echo "$output" | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` + content=`echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` if [ "${content}" != "" ]; then echo '<<>>' echo ${content} From 83780ea4eecaa0664ec3b553dcacae0356fba2d4 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Wed, 13 Dec 2017 16:13:10 +1300 Subject: [PATCH 143/497] freeradius.sh: new agent for incoming main PR (#151) * Update os-updates.sh * Update os-updates.sh * Update os-updates.sh * Create freeradius.sh * Update freeradius.sh * Update freeradius.sh --- snmp/freeradius.sh | 64 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 snmp/freeradius.sh diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh new file mode 100644 index 000000000..088acf3c1 --- /dev/null +++ b/snmp/freeradius.sh @@ -0,0 +1,64 @@ +#!/usr/bin/env bash + +# Set 0 for SNMP extend; set to 1 for Check_MK agent +AGENT=0 + +# Set FreeRADIUS status_server details +RADIUS_SERVER='localhost' +RADIUS_PORT='18121' +RADIUS_KEY='adminsecret' + +# Default radclient access request, shouldn't need to be changed +RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' + +# Pathes for grep and radclient executables, should work if within PATH +BIN_GREP="$(command -v grep)" +BIN_RADCLIENT="$(command -v radclient)" + +if [ $AGENT == 1 ]; then + echo "<<>>" +fi + +RESULT=`echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY` + +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' From d5e9cdd9e72d7b1c8580c7a9900869cd05ed3e74 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sat, 30 Dec 2017 05:39:36 -0600 Subject: [PATCH 144/497] update the fail2ban stuff (#155) Dropping firewall checking as the new fail2ban uses pf and anchors on FreeBSD, which while esoteric as fuck works nicely and is reliable. --- snmp/fail2ban | 250 +++++++++++++++++++++++++++++--------------------- 1 file changed, 146 insertions(+), 104 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 95cf9e31c..117d2c162 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -1,68 +1,103 @@ #!/usr/bin/env perl +# Author: Zane C. Bowers-Hadley -# Add this to your snmpd.conf as below. -# extend fail2ban /etc/snmp/fail2ban -# -# Then add to your cron tab, if you wish to use caching. -# */3 * * * * /etc/snmp/fail2ban.pl -u +# https://docs.librenms.org/#Extensions/Applications/#fail2ban +# See the above for additional information not documented in the POD below. -#make sure this path is correct -my $f2bc="/usr/bin/env fail2ban-client"; +=head1 DESCRIPTION -#make sure this path is correct -my $iptablesPath="/usr/bin/env iptables"; +A basic SNMP extend for polling fail2ban for LibreNMS. -# The cache file to use, if using caching. -my $cache='/var/cache/fail2ban'; +=head1 SWITCHES + +=head2 -c + +Prints the cache file. + +=head2 -C + +Uses the specified file as the cache file. + +If not specified, /var/cache/fail2ban is used. + +=head2 -f + +This is the path to the fail2ban-client if needed. + +If not specified, "/usr/bin/env fail2ban-client" is used. + +=head2 -u + +Updates the cache. + +=head2 -U + +When used with -c, allows attempted cache updating if the file is older +than 360 seconds or does not exist. + +=head1 CRON EXAMPLE + + */3 * * * * /etc/snmp/fail2ban -u + +or + + */3 * * * * /etc/snmp/fail2ban -u -C /foo/bar/cache + +3 minutes is used as LibreNMS runs every 5 minutes, this helps ensure it +is most likely up to date in between runs. + + +=head1 SNMPD SETUP EXAMPLES + + extend fail2ban /etc/snmp/fail2ban -# Please verify that the tables below are correct for your installation -my @linuxChains=('failban','f2b'); -my $freebsdPFtable='fail2ban'; +The above will set it up for basic uncached usage. + +This is likely fine for most configurations. + + extend fail2ban /etc/snmp/fail2ban -c + +Will use the cache. + + extend fail2ban /etc/snmp/fail2ban -c -U + +Will use the cache and update if needed. + + extend fail2ban /etc/snmp/fail2ban -f /foo/bin/fail2ban-client + +Run it with fail2ban being installed under /foo the the path to +fail2ban-cleint being /foo/bin/fail2ban-client. + +=cut -## -## you should not have to touch anything below this -## use strict; use warnings; use Getopt::Std; +#fail2ban-client path +my $f2bc="/usr/bin/env fail2ban-client"; + +#the path to the cache +my $cache='/var/cache/fail2ban'; + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "fail2ban-client SNMP extend 0.0.0\n"; + print "fail2ban-client SNMP extend 1.0.0\n"; }; - sub main::HELP_MESSAGE { print "\n". - "-u Update '".$cache."'\n"; + "-c Print from the cache.\n". + "-C Use this as the cache file.\n". + "-f The fail2ban-client path if needed.". + "-u Update the cache, '".$cache."'\n". + "-U When used with -c, allow update of the cache file if it does not exist or is older than 360 seconds.". + "\n". + "Unless -c or -u is given, it just talks to fail2ban-client and prints the results.\n"; } -#gets the options -my %opts=(); -getopts('u', \%opts); - -#if set to 1, no cache will be written and it will be printed instead -my $noWrite=0; - -if ( ! defined( $opts{u} ) ){ - my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, - $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); - - if (( -f $cache ) && defined( $mtime ) && ( (time-$mtime) < 360 )){ - my $old=''; - open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; - # if this is over 2048, something is most likely wrong - read($readfh , $old , 10240); - close($readfh); - print $old; - }else{ - $opts{u}=1; - $noWrite=1; - } -} - -if (defined( $opts{u} )){ - +#generats stats +sub stats{ #gets a list of jails my $jailsOutput=`$f2bc status`; my @jailsOutputA=split(/\n/, $jailsOutput); @@ -91,69 +126,76 @@ if (defined( $opts{u} )){ $int++; } - ## - ## process the firewall - ## - - my $os=`uname`; + return $total."\n".$toReturn; +} - my $firewalled=0; - - if ( $os =~ '^FreeBSD' ){ - $firewalled=`/sbin/pfctl -t $freebsdPFtable -T show | /usr/bin/grep -c .`; - chomp($firewalled); - }; +#updates $cache +sub cacheUpdate{ + my $stats=stats; + + open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; + print $writefh $stats; + close($writefh); +} + +#prints $cache +sub cachePrint{ + my $old=''; + open(my $readfh, "<", $cache) or die "Can't open '".$cache."'"; + # if this is over 2048, something is most likely wrong + read($readfh , $old , 10240); + close($readfh); + print $old; +} + +#gets the options +my %opts=(); +getopts('uUcC:f:', \%opts); + +#use custom cache file if needed +if ( defined( $opts{C} ) ){ + $cache=$opts{C}; +} + +#use custom fail2ban location if needed +if ( defined( $opts{f} ) ){ + $f2bc=$opts{f}; +} + +#use the cache +if ( defined( $opts{c} ) ){ + my ($dev,$ino,$mode,$nlink,$uid,$gid,$rdev,$size, + $atime,$mtime,$ctime,$blksize,$blocks) = stat($cache); - if ( $os =~ '^Linux' ){ - my $iptables=`$iptablesPath -L -n`; - my @iptablesA=split( /\n/, $iptables ); - - #check each line - my $int=0; - my $count=0; - while( defined( $iptablesA[$int] ) ){ - my $line=$iptablesA[$int]; - - #stop counting if we have a blank line - if ( $line =~ /^$/ ){ - $count=0; - } - - #count /^REJECT/ lines, if we are counting - if ( ( $line =~ /^REJECT/ ) && ( $count ) ){ - $firewalled++; - } - - #check if this is a chain we should count - if ( $line =~ /^Chain/ ){ - my $linuxChainsInt=0; - # check if any of the specified names hit and if so start counting - while( defined( $linuxChains[$linuxChainsInt] ) ){ - my $chain=$linuxChains[$linuxChainsInt]; - if ( $line =~ /^Chain $chain/ ){ - $count=1; - } - - $linuxChainsInt++; - } - } - - $int++; + if (( -f $cache ) && defined( $mtime ) && ( (time-$mtime) < 360 )){ + #cache exists and time is fine + cachePrint; + exit 0; + }else{ + #cache does not exist or is old + if ( $opts{U} ){ + #allowed to update it via -U + cacheUpdate; + cachePrint; + exit 0; + }else{ + #-U not given + warn("'".$cache."' does not exist or is to old and -U was not given"); + exit 1; } - } + warn('we should never get here...'); + exit 2; +} - ## - ## render the output - ## - if ( ! $noWrite ){ - open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; - print $writefh $total."\n".$firewalled."\n".$toReturn; - close($writefh); - }else{ - print $total."\n".$firewalled."\n".$toReturn; - } - +#update the cache +if (defined( $opts{u} )){ + cacheUpdate; exit 0; } + +#no cache opions given, just print it +print &stats; + +exit 0; From 75e4d7e51dcc32da63c33a536c0560c052582f63 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sat, 30 Dec 2017 05:42:37 -0600 Subject: [PATCH 145/497] ZFS SNMP agent :3 <3 (#156) * Add it as it currently is. Needs to be moved over to JSON * rename it to zfs-freebsd as it is FreeBSD specific now uses JSON * misc. updates and document it all * minor spelling correction --- snmp/zfs-freebsd | 266 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 266 insertions(+) create mode 100755 snmp/zfs-freebsd diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd new file mode 100755 index 000000000..db6f33b2d --- /dev/null +++ b/snmp/zfs-freebsd @@ -0,0 +1,266 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for ZFS and FreeBSD for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + + extend zfs /etc/snmp/zfs-freebsd + +=cut + +#Copyright (c) 2017, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats + +use strict; +use warnings; +use JSON; +use Getopt::Std; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "FreeBSD ZFS stats extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +my $sysctls; +my @to_pull=( + 'kstat.zfs', + 'vfs.zfs', + ); +my @sysctls_pull = `/sbin/sysctl -q @to_pull`; +foreach my $stat (@sysctls_pull) { + chomp( $stat ); + my ( $var, $val ) = split(/:/, $stat, 2); + + $val =~ s/^ //; + $sysctls->{$var}=$val; +} + +# does not seem to exist for me, but some of these don't seem to be created till needed +if ( ! defined( $sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"} ) ) { + $sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"}=0; +} + +## +## ARC misc +## +$tojson{deleted}=$sysctls->{"kstat.zfs.misc.arcstats.deleted"}; +$tojson{evict_skip}=$sysctls->{"kstat.zfs.misc.arcstats.evict_skip"}; +$tojson{mutex_skip}=$sysctls->{'kstat.zfs.misc.arcstats.mutex_miss'}; +$tojson{recycle_miss}=$sysctls->{"kstat.zfs.misc.arcstats.recycle_miss"}; + +## +## ARC size +## +my $target_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.c"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; +my $arc_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.size"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; +my $target_size_adaptive_ratio = $sysctls->{"kstat.zfs.misc.arcstats.c"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"}; +my $min_size_percent = $sysctls->{"kstat.zfs.misc.arcstats.c_min"} / $sysctls->{"kstat.zfs.misc.arcstats.c_max"} * 100; + +$tojson{arc_size}=$sysctls->{"kstat.zfs.misc.arcstats.size"}; +$tojson{target_size_max}=$sysctls->{"kstat.zfs.misc.arcstats.c_max"}; +$tojson{target_size_min}=$sysctls->{"kstat.zfs.misc.arcstats.c_min"}; +$tojson{target_size}=$sysctls->{"kstat.zfs.misc.arcstats.c"}; +$tojson{target_size_per}=$target_size_percent; +$tojson{arc_size_per}=$arc_size_percent; +$tojson{target_size_arat}=$target_size_adaptive_ratio; +$tojson{min_size_per}=$min_size_percent; + +## +## ARC size breakdown +## +my $mfu_size; +my $recently_used_percent; +my $frequently_used_percent; +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ + $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; + $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; + $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; +}else{ + $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.c"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; + $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.c"} * 100; + $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.c"} * 100; +} + +$tojson{mfu_size}=$mfu_size; +$tojson{p}=$sysctls->{"kstat.zfs.misc.arcstats.p"}; +$tojson{rec_used_per}=$recently_used_percent; +$tojson{freq_used_per}=$frequently_used_percent; + +## +## ARC efficiency +## +my $arc_hits = $sysctls->{"kstat.zfs.misc.arcstats.hits"}; +my $arc_misses = $sysctls->{"kstat.zfs.misc.arcstats.misses"}; +my $demand_data_hits = $sysctls->{"kstat.zfs.misc.arcstats.demand_data_hits"}; +my $demand_data_misses = $sysctls->{"kstat.zfs.misc.arcstats.demand_data_misses"}; +my $demand_metadata_hits = $sysctls->{"kstat.zfs.misc.arcstats.demand_metadata_hits"}; +my $demand_metadata_misses = $sysctls->{"kstat.zfs.misc.arcstats.demand_metadata_misses"}; +my $mfu_ghost_hits = $sysctls->{"kstat.zfs.misc.arcstats.mfu_ghost_hits"}; +my $mfu_hits = $sysctls->{"kstat.zfs.misc.arcstats.mfu_hits"}; +my $mru_ghost_hits = $sysctls->{"kstat.zfs.misc.arcstats.mru_ghost_hits"}; +my $mru_hits = $sysctls->{"kstat.zfs.misc.arcstats.mru_hits"}; +my $prefetch_data_hits = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_data_hits"}; +my $prefetch_data_misses = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_data_misses"}; +my $prefetch_metadata_hits = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_metadata_hits"}; +my $prefetch_metadata_misses = $sysctls->{"kstat.zfs.misc.arcstats.prefetch_metadata_misses"}; + +my $anon_hits = $arc_hits - ($mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits); +my $arc_accesses_total = $arc_hits + $arc_misses; +my $demand_data_total = $demand_data_hits + $demand_data_misses; +my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses; +my $real_hits = $mfu_hits + $mru_hits; + +my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; +my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; +my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; +my $data_demand_percent = $demand_data_hits / $demand_data_total * 100; + +my $data_prefetch_percent; +if ( $prefetch_data_total != 0 ) { + $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; +}else{ + $data_prefetch_percent = 0; +} + +my $anon_hits_percent; +if ( $anon_hits != 0 ) { + $anon_hits_percent = $anon_hits / $arc_hits * 100; +}else{ + $anon_hits_percent=0; +} + +my $mru_percent = $mru_hits / $arc_hits * 100; +my $mfu_percent = $mfu_hits / $arc_hits * 100; +my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100; +my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100; + +my $demand_hits_percent = $demand_data_hits / $arc_hits * 100; +my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100; +my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100; +my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100; + +my $demand_misses_percent = $demand_data_misses / $arc_misses * 100; +my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100; +my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100; +my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100; + +# ARC misc. efficient stats +$tojson{arc_hits}=$arc_hits; +$tojson{arc_misses}=$arc_misses; +$tojson{demand_data_hits}=$demand_data_hits; +$tojson{demand_data_misses}=$demand_data_misses; +$tojson{demand_meta_hits}=$demand_metadata_hits; +$tojson{demand_meta_misses}=$demand_metadata_misses; +$tojson{mfu_ghost_hits}=$mfu_ghost_hits; +$tojson{mfu_hits}=$mfu_hits; +$tojson{mru_ghost_hits}=$mru_ghost_hits; +$tojson{mru_hits}=$mru_hits; +$tojson{pre_data_hits}=$prefetch_data_hits; +$tojson{pre_data_misses}=$prefetch_data_misses; +$tojson{pre_meta_hits}=$prefetch_metadata_hits; +$tojson{pre_meta_misses}=$prefetch_metadata_misses; +$tojson{anon_hits}=$anon_hits; +$tojson{arc_accesses_total}=$arc_accesses_total; +$tojson{demand_data_total}=$demand_data_total; +$tojson{pre_data_total}=$prefetch_data_total; +$tojson{real_hits}=$real_hits; + +# ARC efficient percents +$tojson{cache_hits_per}=$cache_hit_percent; +$tojson{cache_miss_per}=$cache_miss_percent; +$tojson{actual_hit_per}=$actual_hit_percent; +$tojson{data_demand_per}=$data_demand_percent; +$tojson{data_pre_per}=$data_prefetch_percent; +$tojson{anon_hits_per}=$anon_hits_percent; +$tojson{mru_per}=$mru_percent; +$tojson{mfu_per}=$mfu_percent; +$tojson{mru_ghost_per}=$mru_ghost_percent; +$tojson{mfu_ghost_per}=$mfu_ghost_percent; +$tojson{demand_hits_per}=$demand_hits_percent; +$tojson{pre_hits_per}=$prefetch_hits_percent; +$tojson{meta_hits_per}=$metadata_hits_percent; +$tojson{pre_meta_hits_per}=$prefetch_metadata_hits_percent; +$tojson{demand_misses_per}=$demand_misses_percent; +$tojson{pre_misses_per}=$prefetch_misses_percent; +$tojson{meta_misses_per}=$metadata_misses_percent; +$tojson{pre_meta_misses_per}=$prefetch_metadata_misses_percent; + +#process each pool and shove them into JSON +my $zpool_output=`/sbin/zpool list -pH`; +my @pools=split( /\n/, $zpool_output ); +my $pools_int=0; +my @toShoveIntoJSON; +while ( defined( $pools[$pools_int] ) ) { + my %newPool; + + my $pool=$pools[$pools_int]; + $pool =~ s/\t/,/g; + $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\%//g; + $pool =~ s/\,([0-1\.]*)x\,/,$1,/; + + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + + push(@toShoveIntoJSON, \%newPool); + + $pools_int++; +} +$tojson{pools}=\@toShoveIntoJSON; + +my $j=JSON->new; + +if ( $opts{p} ){ + $j->pretty(1); +} + +print $j->encode( \%tojson ); + +if (! $opts{p} ){ + print "\n"; +} + +exit 0; From 6e3dcc2120068d058ee4ba571442cfa3161aafe2 Mon Sep 17 00:00:00 2001 From: crcro Date: Sat, 6 Jan 2018 22:06:45 +0200 Subject: [PATCH 146/497] extend: powerdns-dnsdist (#158) * powerdns-dnsdist app * fix script in help * removed local data manipulation * again name of file in script help * removed personal api info --- snmp/powerdns-dnsdist | 165 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 snmp/powerdns-dnsdist diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist new file mode 100644 index 000000000..87eda58bd --- /dev/null +++ b/snmp/powerdns-dnsdist @@ -0,0 +1,165 @@ +#!/usr/bin/env bash +set -euo pipefail +IFS=$'\n\t' + +API_AUTH_USER="admin" +API_AUTH_PASS="" +API_URL="" +API_STATS="jsonstat?command=stats" +TMP_FILE="/tmp/dnsdist_current.stats" + +#/ Description: BASH script to get PowerDNS dnsdist stats +#/ Examples: ./powerdns-dnsdist +#/ Options: +#/ --help: Display this help message +#/ --debug: Brief check of system env and script vars + +usage() { + grep '^#/' "$0" | cut -c4- ; + exit 0 ; +} + +debug() { + if [ -z "$API_AUTH_USER" ]; then + echo '[error] var API_AUTH_USER is not set' + else + echo '[ok] var API_AUTH_USER is set' + fi + + if [ -z "$API_AUTH_PASS" ]; then + echo '[error] var API_AUTH_PASS is not set' + else + echo '[ok] var API_AUTH_PASS is set' + fi + + if [ -z "$API_URL" ]; then + echo '[error] var API_URL is not set' + else + echo '[ok] var API_URL is set' + fi + + if [ -z "$API_STATS" ]; then + echo '[error] var API_STATS is not set' + else + echo '[ok] var API_STATS is set' + fi + + if ! [ -x "$(command -v curl)" ]; then + echo '[error] bin curl not available, please install it' + else + echo '[ok] bin curl' + fi + + if ! [ -x "$(command -v jq)" ]; then + echo '[error] bin jq not available, please install it' + else + echo '[ok] bin jq' + fi + + if ! [ -x "$(command -v cat)" ]; then + echo '[error] bin cat not available, please install it' + else + echo '[ok] bin cat' + fi +} + +exportdata() { + # get current data + curl -s -u$API_AUTH_USER:$API_AUTH_PASS $API_URL$API_STATS | jq '.' > $TMP_FILE + + # generate export values + JSON_VALUES=$(cat $TMP_FILE) + + STAT_CACHE_HIT=$(echo $JSON_VALUES | jq '."cache-hits"') + echo $STAT_CACHE_HIT + + STAT_CACHE_MISS=$(echo $JSON_VALUES | jq '."cache-misses"') + echo $STAT_CACHE_MISS + + STAT_DOWNSTREAM_ERR=$(echo $JSON_VALUES | jq '."downstream-send-errors"') + echo $STAT_DOWNSTREAM_ERR + + STAT_DOWNSTREAM_TIMEOUT=$(echo $JSON_VALUES | jq '."downstream-timeouts"') + echo $STAT_DOWNSTREAM_TIMEOUT + + STAT_DYNAMIC_BLOCK_SIZE=$(echo $JSON_VALUES | jq '."dyn-block-nmg-size"') + echo $STAT_DYNAMIC_BLOCK_SIZE + + STAT_DYNAMIC_BLOCK=$(echo $JSON_VALUES | jq '."dyn-blocked"') + echo $STAT_DYNAMIC_BLOCK + + STAT_QUERIES_COUNT=$(echo $JSON_VALUES | jq '.queries') + echo $STAT_QUERIES_COUNT + + STAT_QUERIES_RECURSIVE=$(echo $JSON_VALUES | jq '.rdqueries') + echo $STAT_QUERIES_RECURSIVE + + STAT_QUERIES_EMPTY=$(echo $JSON_VALUES | jq '."empty-queries"') + echo $STAT_QUERIES_EMPTY + + STAT_QUERIES_DROP_NO_POLICY=$(echo $JSON_VALUES | jq '."no-policy"') + echo $STAT_QUERIES_DROP_NO_POLICY + + STAT_QUERIES_DROP_NC=$(echo $JSON_VALUES | jq '."noncompliant-queries"') + echo $STAT_QUERIES_DROP_NC + + STAT_QUERIES_DROP_NC_ANSWER=$(echo $JSON_VALUES | jq '."noncompliant-responses"') + echo $STAT_QUERIES_DROP_NC_ANSWER + + STAT_QUERIES_SELF_ANSWER=$(echo $JSON_VALUES | jq '."self-answered"') + echo $STAT_QUERIES_SELF_ANSWER + + STAT_QUERIES_SERVFAIL=$(echo $JSON_VALUES | jq '."servfail-responses"') + echo $STAT_QUERIES_SERVFAIL + + STAT_QUERIES_FAILURE=$(echo $JSON_VALUES | jq '."trunc-failures"') + echo $STAT_QUERIES_FAILURE + + STAT_QUERIES_ACL_DROPS=$(echo $JSON_VALUES | jq '."acl-drops"') + echo $STAT_QUERIES_ACL_DROPS + + STAT_RULE_DROP=$(echo $JSON_VALUES | jq '."rule-drop"') + echo $STAT_RULE_DROP + + STAT_RULE_NXDOMAIN=$(echo $JSON_VALUES | jq '."rule-nxdomain"') + echo $STAT_RULE_NXDOMAIN + + STAT_RULE_REFUSED=$(echo $JSON_VALUES | jq '."rule-refused"') + echo $STAT_RULE_REFUSED + + STAT_LATENCY_AVG_100=$(echo $JSON_VALUES | jq '."latency-avg100"') + echo $STAT_LATENCY_AVG_100 + + STAT_LATENCY_AVG_1000=$(echo $JSON_VALUES | jq '."latency-avg1000"') + echo $STAT_LATENCY_AVG_1000 + + STAT_LATENCY_AVG_10000=$(echo $JSON_VALUES | jq '."latency-avg10000"') + echo $STAT_LATENCY_AVG_10000 + + STAT_LATENCY_AVG_1000000=$(echo $JSON_VALUES | jq '."latency-avg1000000"') + echo $STAT_LATENCY_AVG_1000000 + + STAT_LATENCY_SLOW=$(echo $JSON_VALUES | jq '."latency-slow"') + echo $STAT_LATENCY_SLOW + + STAT_LATENCY_0_1=$(echo $JSON_VALUES | jq '."latency0-1"') + echo $STAT_LATENCY_0_1 + + STAT_LATENCY_1_10=$(echo $JSON_VALUES | jq '."latency1-10"') + echo $STAT_LATENCY_1_10 + + STAT_LATENCY_10_50=$(echo $JSON_VALUES | jq '."latency10-50"') + echo $STAT_LATENCY_10_50 + + STAT_LATENCY_50_100=$(echo $JSON_VALUES | jq '."latency50-100"') + echo $STAT_LATENCY_50_100 + + STAT_LATENCY_100_1000=$(echo $JSON_VALUES | jq '."latency100-1000"') + echo $STAT_LATENCY_100_1000 +} + +if [ -z $* ]; then + exportdata +fi +expr "$*" : ".*--help" > /dev/null && usage +expr "$*" : ".*--debug" > /dev/null && debug From 80cee4e1eb0cf9f3af8f8321cf00441d55f0f378 Mon Sep 17 00:00:00 2001 From: endofline Date: Sun, 18 Feb 2018 22:33:42 +0200 Subject: [PATCH 147/497] Fix Command_Timeout missing from SMART output (#163) --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 35e059916..ab07690c3 100755 --- a/snmp/smart +++ b/snmp/smart @@ -299,7 +299,7 @@ while ( defined($disks[$int]) ) { $total=$total+$rawA[$rawAint]; $rawAint++; } - + $IDs{$id}=$total; } # 190, airflow temp From 9c0f20e3fb82dfafbe90b9232825e0f8f3603da2 Mon Sep 17 00:00:00 2001 From: endofline Date: Tue, 27 Feb 2018 23:10:35 +0200 Subject: [PATCH 148/497] Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) --- snmp/smart | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index ab07690c3..567e509ba 100755 --- a/snmp/smart +++ b/snmp/smart @@ -239,6 +239,7 @@ my $toReturn=''; my $int=0; while ( defined($disks[$int]) ) { my $disk=$disks[$int]; + my $disk_sn=$disk; my $output=`$smartctl -A /dev/$disk`; my %IDs=( '5'=>'null', @@ -329,8 +330,13 @@ while ( defined($disks[$int]) ) { my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); + # get the drive serial number + while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + $disk_sn = $1; + $disk_sn =~ s/^\s+|\s+$//g; + } - $toReturn=$toReturn.$disk.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} + $toReturn=$toReturn.$disk_sn.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; From 03a9a29ec186dfb550e64fdc8e9ab6bd4a04e5d2 Mon Sep 17 00:00:00 2001 From: Dylan Underwood Date: Fri, 23 Mar 2018 11:24:02 -0500 Subject: [PATCH 149/497] Should be greater than or equal to (#167) --- snmp/os-updates.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/os-updates.sh b/snmp/os-updates.sh index 6986c1d8c..33e1f9c62 100755 --- a/snmp/os-updates.sh +++ b/snmp/os-updates.sh @@ -31,7 +31,7 @@ CMD_PACMAN='-Sup' if [ -f $BIN_ZYPPER ]; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 2 ]; then + if [ $UPDATES -ge 2 ]; then echo $(($UPDATES-2)); else echo "0"; @@ -39,7 +39,7 @@ if [ -f $BIN_ZYPPER ]; then elif [ -f $BIN_DNF ]; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -47,7 +47,7 @@ elif [ -f $BIN_DNF ]; then elif [ -f $BIN_PACMAN ]; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -55,7 +55,7 @@ elif [ -f $BIN_PACMAN ]; then elif [ -f $BIN_YUM ]; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; @@ -63,7 +63,7 @@ elif [ -f $BIN_YUM ]; then elif [ -f $BIN_APT ]; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -gt 1 ]; then + if [ $UPDATES -ge 1 ]; then echo $UPDATES; else echo "0"; From c830dee1f1704f9af89d2a31d6b9b9f73a777517 Mon Sep 17 00:00:00 2001 From: VVelox Date: Tue, 10 Apr 2018 22:04:07 -0500 Subject: [PATCH 150/497] correct arc size breakdown --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index db6f33b2d..cea6e1e95 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -115,7 +115,7 @@ $tojson{min_size_per}=$min_size_percent; my $mfu_size; my $recently_used_percent; my $frequently_used_percent; -if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.c"} ){ $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; From d7f661e2ebce57fdb7a7176959f9762116d5335f Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 11 Apr 2018 02:34:39 -0500 Subject: [PATCH 151/497] correct arc size breakdown (#171) --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index db6f33b2d..cea6e1e95 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -115,7 +115,7 @@ $tojson{min_size_per}=$min_size_percent; my $mfu_size; my $recently_used_percent; my $frequently_used_percent; -if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.size"} ){ +if ( $sysctls->{"kstat.zfs.misc.arcstats.size"} >= $sysctls->{"kstat.zfs.misc.arcstats.c"} ){ $mfu_size = $sysctls->{"kstat.zfs.misc.arcstats.size"} - $sysctls->{"kstat.zfs.misc.arcstats.p"}; $recently_used_percent = $sysctls->{"kstat.zfs.misc.arcstats.p"} / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; $frequently_used_percent = $mfu_size / $sysctls->{"kstat.zfs.misc.arcstats.size"} * 100; From 8bf753beadf617cbcacc94f38079f8706c7eac37 Mon Sep 17 00:00:00 2001 From: Serphentas Date: Wed, 11 Apr 2018 10:39:32 +0200 Subject: [PATCH 152/497] add zfs support for linux (#170) * add zfs support for linux * fix pools and anon_hits_per * strip percent sign for pool cap * fix anon_hits json key typo * fix demand_data_hits json key typo * fix comparison as in #169 * fix min_size_percent --- snmp/zfs-linux | 178 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 178 insertions(+) create mode 100644 snmp/zfs-linux diff --git a/snmp/zfs-linux b/snmp/zfs-linux new file mode 100644 index 000000000..f8bc5a3e1 --- /dev/null +++ b/snmp/zfs-linux @@ -0,0 +1,178 @@ +#!/usr/bin/python3 +import json +import subprocess + +def main(args): + res = {} + + ARCSTATS = open('/proc/spl/kstat/zfs/arcstats', 'r') + LINES = ARCSTATS.readlines() + LINES = [x.strip() for x in LINES] + + STATS = {} + for line in LINES[2:]: + splitline = line.split() + STATS[splitline[0]] = int(splitline[2]) + + # ARC misc + DELETED = STATS['deleted'] + EVICT_SKIP = STATS['evict_skip'] + MUTEX_SKIP = STATS['mutex_miss'] + RECYCLE_MISS = STATS['recycle_miss'] if 'recycle_miss' in STATS else 0 + + # ARC size + ARC_SIZE = STATS['size'] + TARGET_SIZE_MAX = STATS['c_max'] + TARGET_SIZE_MIN = STATS['c_min'] + TARGET_SIZE = STATS['c'] + + TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100 + ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100 + TARGET_SIZE_ADAPTIVE_RATIO = TARGET_SIZE / TARGET_SIZE_MAX + MIN_SIZE_PERCENT = TARGET_SIZE_MIN / TARGET_SIZE_MAX * 100 + + # ARC size breakdown + MFU_SIZE = 0 + RECENTLY_USED_PERCENT = 0 + FREQUENTLY_USED_PERCENT = 0 + P = STATS['p'] + + if ARC_SIZE >= TARGET_SIZE: + MFU_SIZE = ARC_SIZE - P + RECENTLY_USED_PERCENT = P / ARC_SIZE * 100 + FREQUENTLY_USED_PERCENT = MFU_SIZE / ARC_SIZE * 100 + else: + MFU_SIZE = TARGET_SIZE - P + RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100 + FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100 + + + # ARC misc. efficient stats + ARC_HITS = STATS['hits'] + ARC_MISSES = STATS['misses'] + DEMAND_DATA_HITS = STATS['demand_data_hits'] + DEMAND_DATA_MISSES = STATS['demand_data_misses'] + DEMAND_METADATA_HITS = STATS['demand_metadata_hits'] + DEMAND_METADATA_MISSES = STATS['demand_metadata_misses'] + MFU_GHOST_HITS = STATS['mfu_ghost_hits'] + MFU_HITS = STATS['mfu_hits'] + MRU_GHOST_HITS = STATS['mru_ghost_hits'] + MRU_HITS = STATS['mru_hits'] + PREFETCH_DATA_HITS = STATS['prefetch_data_hits'] + PREFETCH_DATA_MISSES = STATS['prefetch_data_misses'] + PREFETCH_METADATA_HITS = STATS['prefetch_metadata_hits'] + PREFETCH_METADATA_MISSES = STATS['prefetch_metadata_misses'] + + ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS) + ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES + DEMAND_DATA_TOTAL = DEMAND_DATA_HITS + DEMAND_DATA_MISSES + PREFETCH_DATA_TOTAL = PREFETCH_DATA_HITS + PREFETCH_DATA_MISSES + REAL_HITS = MFU_HITS + MRU_HITS + + # ARC efficiency percentages + CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 + CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 + ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + + DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 + + ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 + + MRU_PERCENT = MRU_HITS / ARC_HITS * 100 + MFU_PERCENT = MFU_HITS / ARC_HITS * 100 + MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 + MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 + + DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 + PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 + METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 + PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 + + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + + # pools + proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + if proc.returncode != 0: + return proc.returncode + + pools = [] + FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + for line in proc.stdout.splitlines(): + info = dict(zip(FIELDS, line.split('\t'))) + + info['expandsz'] = 0 if info['expandsz'] == '-' else info['expandsz'] + info['frag'] = info['frag'].rstrip('%') + info['frag'] = 0 if info['frag'] == '-' else info['frag'] + info['dedup'] = info['dedup'].rstrip('x') + info['cap'] = info['cap'].rstrip('%') + + pools.append(info) + + res = { + 'deleted': DELETED, # ARC misc + 'evict_skip': EVICT_SKIP, + 'mutex_skip': MUTEX_SKIP, + 'recycle_miss': RECYCLE_MISS, + 'arc_size': ARC_SIZE, # ARC size + 'target_size_max': TARGET_SIZE_MAX, + 'target_size_min': TARGET_SIZE_MIN, + 'target_size': TARGET_SIZE, + 'target_size_per': TARGET_SIZE_PERCENT, + 'arc_size_per': ARC_SIZE_PERCENT, + 'target_size_arat': TARGET_SIZE_ADAPTIVE_RATIO, + 'min_size_per': MIN_SIZE_PERCENT, + 'mfu_size': MFU_SIZE, # ARC size breakdown + 'p': P, + 'rec_used_per': RECENTLY_USED_PERCENT, + 'freq_used_per': FREQUENTLY_USED_PERCENT, + 'arc_hits': ARC_HITS, # ARC efficiency + 'arc_misses': ARC_MISSES, + 'demand_data_hits': DEMAND_DATA_HITS, + 'demand_data_misses': DEMAND_DATA_MISSES, + 'demand_meta_hits': DEMAND_METADATA_HITS, + 'demand_meta_misses': DEMAND_METADATA_MISSES, + 'mfu_ghost_hits': MFU_GHOST_HITS, + 'mfu_hits': MFU_HITS, + 'mru_ghost_hits': MRU_GHOST_HITS, + 'mru_hits': MRU_HITS, + 'pre_data_hits': PREFETCH_DATA_HITS, + 'pre_data_misses': PREFETCH_DATA_MISSES, + 'pre_meta_hits': PREFETCH_METADATA_HITS, + 'pre_meta_misses': PREFETCH_METADATA_HITS, + 'anon_hits': ANON_HITS, + 'arc_accesses_total': ARC_ACCESSES_TOTAL, + 'demand_data_total': DEMAND_DATA_TOTAL, + 'pre_data_total': PREFETCH_DATA_TOTAL, + 'real_hits': REAL_HITS, + 'cache_hits_per': CACHE_HIT_PERCENT, # ARC efficiency percentages + 'cache_miss_per': CACHE_MISS_PERCENT, + 'actual_hit_per': ACTUAL_HIT_PERCENT, + 'data_demand_per': DATA_DEMAND_PERCENT, + 'data_pre_per': DATA_PREFETCH_PERCENT, + 'anon_hits_per': ANON_HITS_PERCENT, + 'mru_per': MRU_PERCENT, + 'mfu_per': MFU_PERCENT, + 'mru_ghost_per': MRU_GHOST_PERCENT, + 'mfu_ghost_per': MFU_GHOST_PERCENT, + 'demand_hits_per': DEMAND_HITS_PERCENT, + 'pre_hits_per': PREFETCH_HITS_PERCENT, + 'meta_hits_per': METADATA_HITS_PERCENT, + 'pre_meta_hits_per': PREFETCH_METADATA_HITS_PERCENT, + 'demand_misses_per': DEMAND_MISSES_PERCENT, + 'pre_misses_per': PREFETCH_MISSES_PERCENT, + 'meta_misses_per': METADATA_MISSES_PERCENT, + 'pre_meta_misses_per': PREFETCH_METADATA_MISSES_PERCENT, + 'pools': pools + } + + print(json.dumps(res)) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From ee9a6f82f9ecaec7980d923c0c6b86929c088746 Mon Sep 17 00:00:00 2001 From: Sander Steffann Date: Fri, 13 Apr 2018 17:42:27 +0100 Subject: [PATCH 153/497] Add random entropy monitoring (#173) --- snmp/entropy.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 snmp/entropy.sh diff --git a/snmp/entropy.sh b/snmp/entropy.sh new file mode 100644 index 000000000..08bd801ca --- /dev/null +++ b/snmp/entropy.sh @@ -0,0 +1,2 @@ +#!/usr/bin/env bash +cat /proc/sys/kernel/random/entropy_avail From 29b5f1de6fb2bd44ac4f2df23d8cb4076d21e7b9 Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 27 Apr 2018 16:46:57 -0500 Subject: [PATCH 154/497] make using SN or device name selectable for SMART reporting (#168) * make using SN or device name selectable * change the default to SN --- snmp/smart | 33 ++++++++++++++++++++++----------- 1 file changed, 22 insertions(+), 11 deletions(-) diff --git a/snmp/smart b/snmp/smart index 567e509ba..44b7a31e7 100755 --- a/snmp/smart +++ b/snmp/smart @@ -42,7 +42,8 @@ line with out a = or # are treated as a disk. #This is a comment cache=/var/cache/smart - smartctl=/usr/bin/env smartctl + smartctl=/usr/local/sbin/smartctl + useSN=0 ada0 ada1 @@ -50,6 +51,8 @@ The variables are as below. cache = The path to the cache file to use. Default: /var/cache/smart smartctl = The path to use for smartctl. Default: /usr/bin/env smartctl + useSN = If set to 1, it will use the disks SN for reporting instead of the device name. + 1 is the default. 0 will use the device name. If you want to guess at the configuration, call it with -g and it will print out what it thinks it should be. @@ -59,14 +62,15 @@ it should be. ## ## You should not need to touch anything below here. ## -my $cache='/var/cache/smart'; -my $smartctl='/usr/bin/env smartctl'; -my @disks; - use warnings; use strict; use Getopt::Std; +my $cache='/var/cache/smart'; +my $smartctl='/usr/bin/env smartctl'; +my @disks; +my $useSN=1; + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { print "SMART SNMP extend 0.0.0\n"; @@ -169,7 +173,7 @@ if ( defined( $opts{g} ) ){ $matches_int++; } - print 'smartctl='.$smartctl."\n". + print "useSN=0\n".'smartctl='.$smartctl."\n". $cache. join( "\n", keys(%found_disks) )."\n"; @@ -209,6 +213,10 @@ while ( defined( $configA[$configA_int] ) ){ $smartctl=$val; } + if ( $var eq 'useSN' ){ + $useSN=$val; + } + if ( !defined( $val ) ){ push(@disks, $var); } @@ -330,13 +338,16 @@ while ( defined($disks[$int]) ) { my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); - # get the drive serial number - while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { - $disk_sn = $1; - $disk_sn =~ s/^\s+|\s+$//g; + # get the drive serial number, if needed + my $disk_id=$disk; + if ( $useSN ){ + while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + $disk_id = $1; + $disk_id =~ s/^\s+|\s+$//g; + } } - $toReturn=$toReturn.$disk_sn.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} + $toReturn=$toReturn.$disk_id.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; From 59d3e4fae7ad81579f9847f789df59a8b79a1f93 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 28 May 2018 07:22:09 -0500 Subject: [PATCH 155/497] convert fail2ban-client to JSON (#172) * convert to JSON * add version return * change the version number of the returned data to 1 --- snmp/fail2ban | 88 ++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 62 insertions(+), 26 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 117d2c162..f965c558d 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -26,6 +26,10 @@ This is the path to the fail2ban-client if needed. If not specified, "/usr/bin/env fail2ban-client" is used. +=head2 -p + +Pretty prints the JSON. + =head2 -u Updates the cache. @@ -73,6 +77,7 @@ fail2ban-cleint being /foo/bin/fail2ban-client. use strict; use warnings; use Getopt::Std; +use JSON; #fail2ban-client path my $f2bc="/usr/bin/env fail2ban-client"; @@ -90,6 +95,7 @@ sub main::HELP_MESSAGE { "-c Print from the cache.\n". "-C Use this as the cache file.\n". "-f The fail2ban-client path if needed.". + "-p Pretty prints the JSON.\n". "-u Update the cache, '".$cache."'\n". "-U When used with -c, allow update of the cache file if it does not exist or is older than 360 seconds.". "\n". @@ -98,40 +104,70 @@ sub main::HELP_MESSAGE { #generats stats sub stats{ + my %toReturn; + $toReturn{total}=0; # total number in jails + $toReturn{jails}={}; # each jail + $toReturn{error}=0; # error code, 0 if good + $toReturn{errorString}=''; # detailed description of any errors + $toReturn{version}='1'; # format version of the returned data + #gets a list of jails my $jailsOutput=`$f2bc status`; - my @jailsOutputA=split(/\n/, $jailsOutput); - my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); - $jailsS=~s/.*\://; - $jailsS=~s/\s//g; - my @jails=split(/\,/, $jailsS); + $toReturn{error}=$?; - #process jail - my $int=0; - my $total=0; - my $toReturn=''; - while(defined($jails[$int])){ + if ( $? == -1){ + $toReturn{errorString}='failed to run fail2ban-client'; + } + elsif ($? & 127) { + $toReturn{errorString}= sprintf "fail2ban-client died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + } + else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; + } + + if ( $toReturn{error} == 0 ){ - #get the total for this jail - my $jailStatusOutput=`$f2bc status $jails[$int]`; - my @jailStatusOutputA=split(/\n/, $jailStatusOutput); - my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); - $jailTotal=~s/.*\://; - $jailTotal=~s/\s//g; + my @jailsOutputA=split(/\n/, $jailsOutput); + my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); + $jailsS=~s/.*\://; + $jailsS=~s/\s//g; + my @jails=split(/\,/, $jailsS); - #tally the total and add this jail to the list - $total=$total+$jailTotal; - $toReturn=$toReturn.$jails[$int].' '.$jailTotal."\n"; + #process jails + my $int=0; + while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`$f2bc status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $toReturn{total} = $toReturn{total} + $jailTotal; + $toReturn{jails}{ $jails[$int] } = $jailTotal; + + $int++; + } - $int++; + } + + my $j=JSON->new; + + if ( $_[0] ){ + $j->pretty(1); + return $j->encode( \%toReturn ); } - return $total."\n".$toReturn; + return $j->encode( \%toReturn )."\n"; } #updates $cache sub cacheUpdate{ - my $stats=stats; + my $stats=stats($_[0]); open(my $writefh, ">", $cache) or die "Can't open '".$cache."'"; print $writefh $stats; @@ -150,7 +186,7 @@ sub cachePrint{ #gets the options my %opts=(); -getopts('uUcC:f:', \%opts); +getopts('puUcC:f:', \%opts); #use custom cache file if needed if ( defined( $opts{C} ) ){ @@ -175,7 +211,7 @@ if ( defined( $opts{c} ) ){ #cache does not exist or is old if ( $opts{U} ){ #allowed to update it via -U - cacheUpdate; + cacheUpdate( $opts{p} ); cachePrint; exit 0; }else{ @@ -190,12 +226,12 @@ if ( defined( $opts{c} ) ){ #update the cache if (defined( $opts{u} )){ - cacheUpdate; + cacheUpdate( $opts{p} ); exit 0; } #no cache opions given, just print it -print &stats; +print &stats( $opts{p} ); exit 0; From bc8a55e00d54f95c439557cbe663b702d34b6983 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Istv=C3=A1n=20S=C3=A1r=C3=A1ndi?= Date: Mon, 25 Jun 2018 16:10:00 +0200 Subject: [PATCH 156/497] Update fail2ban extend script to new JSON format (#181) As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. --- snmp/fail2ban | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index f965c558d..85640021b 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -105,8 +105,9 @@ sub main::HELP_MESSAGE { #generats stats sub stats{ my %toReturn; - $toReturn{total}=0; # total number in jails - $toReturn{jails}={}; # each jail + $toReturn{data}={}; + $toReturn{data}{total}=0; # total number in jails + $toReturn{data}{jails}={}; # each jail $toReturn{error}=0; # error code, 0 if good $toReturn{errorString}=''; # detailed description of any errors $toReturn{version}='1'; # format version of the returned data @@ -147,8 +148,8 @@ sub stats{ $jailTotal=~s/\s//g; #tally the total and add this jail to the list - $toReturn{total} = $toReturn{total} + $jailTotal; - $toReturn{jails}{ $jails[$int] } = $jailTotal; + $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; + $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; $int++; } From 5de85f15b0ae2e99e565b15734089d03ba54341d Mon Sep 17 00:00:00 2001 From: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> Date: Thu, 19 Jul 2018 22:39:08 +0200 Subject: [PATCH 157/497] Asterisk Script (#183) Asterisk App support. - Channels - Calls - Total SIP Peers - Monitored Online - Monitored Offline - Unmonitored Online - Unmonitored Offline --- snmp/asterisk | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) create mode 100644 snmp/asterisk diff --git a/snmp/asterisk b/snmp/asterisk new file mode 100644 index 000000000..109aec4a4 --- /dev/null +++ b/snmp/asterisk @@ -0,0 +1,19 @@ +#!/bin/bash + +ASCLI=/usr/sbin/asterisk + +if [ -f $ASCLI ]; +then + $ASCLI -rx "core show uptime" > /dev/null + if [ $? -ne 0 ]; then + # Asterisk not running, silently exit. + exit 0 + fi + + echo "<<>>" + $ASCLI -rx "core show channels" | awk '/active calls/ { print "Calls=" $1 } /active channels/ { print "Channels=" $1}' + $ASCLI -rx 'sip show peers' | awk '/sip peers/ { print "SipPeers=" $1 "\nSipMonOnline=" $5 "\nSipMonOffline=" $7 "\nSipUnMonOnline=" $10 "\nSipUnMonOffline=" $12}' + +else + exit 0 +fi From 60b6a096f416eb58c266345b8e6db124389213bc Mon Sep 17 00:00:00 2001 From: crcro Date: Fri, 10 Aug 2018 00:44:02 +0300 Subject: [PATCH 158/497] added rockstor nas distro detection (#187) --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index 61ad2488c..d13af0629 100755 --- a/snmp/distro +++ b/snmp/distro @@ -30,6 +30,8 @@ elif [ "${OS}" = "Linux" ] ; then REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` elif [ -f /etc/oracle-release ]; then DIST="Oracle" + elif [ -f /etc/rockstor-release ]; then + DIST="Rockstor" else DIST="RedHat" fi From 4342eab181a23829b7da0caff3f2066690269755 Mon Sep 17 00:00:00 2001 From: VVelox Date: Sun, 19 Aug 2018 17:47:07 -0500 Subject: [PATCH 159/497] update for the new json_app_get stuff (#179) --- snmp/zfs-freebsd | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index cea6e1e95..d78658c2d 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.0.0\n"; + print "FreeBSD ZFS stats extend 0.1.0\n"; } sub main::HELP_MESSAGE { @@ -251,13 +251,19 @@ while ( defined( $pools[$pools_int] ) ) { } $tojson{pools}=\@toShoveIntoJSON; +my %head_hash; +$head_hash{'data'}=\%tojson; +$head_hash{'version'}=1; +$head_hash{'error'}=0; +$head_hash{'errorString'}=''; + my $j=JSON->new; if ( $opts{p} ){ $j->pretty(1); } -print $j->encode( \%tojson ); +print $j->encode( \%head_hash ); if (! $opts{p} ){ print "\n"; From c5a9cb51d640256bc633abf9b4da3c7bb7824f32 Mon Sep 17 00:00:00 2001 From: VVelox Date: Mon, 27 Aug 2018 04:03:01 -0500 Subject: [PATCH 160/497] convert all the NTP stuff to JSON (#174) This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. Has been tested as working on DD-WRT and FreeBSD. --- snmp/ntp-client.sh | 65 +++++++++++++++++--------- snmp/ntp-server.sh | 112 +++++++++++++++++++++++++++++++-------------- 2 files changed, 121 insertions(+), 56 deletions(-) diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh index aa56f810d..c04e8b680 100755 --- a/snmp/ntp-client.sh +++ b/snmp/ntp-client.sh @@ -1,25 +1,48 @@ -#!/usr/bin/env bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend ntp-client /opt/ntp-client.sh # -# restart snmpd and activate the app for desired host # -# please make sure you have the path/binaries below # -################################################################ -# Binaries and paths required # -################################################################ -BIN_NTPQ="$(command -v ntpq)" -BIN_GREP="$(command -v grep)" -BIN_TR="$(command -v tr)" -BIN_CUT="$(command -v cut)" +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client.sh then it must go in ntp-client.sh.conf . +# +# NTPQV output version of "ntpq -c rv" +# p1 DD-WRT and some other outdated linux distros +# p11 FreeBSD 11 and any linux distro that is up to date +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +BIN_NTPQ='/usr/bin/env ntpq' +BIN_GREP='/usr/bin/env grep' +BIN_SED="/usr/bin/env sed" +BIN_AWK='/usr/bin/env awk' +NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` -IFS=', ' read -r -a array <<< "$CMD1" - -for value in 2 3 4 5 6 -do - echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 -done +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi +VERSION=1 +#error and errorString are hardcoded as if the above fails bad json will be generated +RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +if [ $NTPQV = "p11" ]; then + echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$3\ + "\",\"frequency\":\""$4\ + "\",\"sys_jitter\":\""$5\ + "\",\"clk_jitter\":\""$6\ + "\",\"clk_wander\":\""$7\ + "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" + }' + exit 0 +fi +if [ $NTPQV = "p1" ]; then + echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$2\ + "\",\"frequency\":\""$3\ + "\",\"sys_jitter\":\""$4\ + "\",\"clk_jitter\":\""$5\ + "\",\"clk_wander\":\""$6\ + "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" + }' + exit 0 +fi diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 506b2f8d3..bbf5c737a 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -1,47 +1,89 @@ -#!/usr/bin/env bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf and include # -# extend ntp-server /opt/ntp-server.sh # -# restart snmpd and activate the app for desired host # -# please make sure you have the path/binaries below # -################################################################ -# Binaries and paths required # -################################################################ -BIN_NTPD='/usr/sbin/ntpd' -BIN_NTPQ='/usr/sbin/ntpq' -BIN_NTPDC='/usr/sbin/ntpdc' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_CUT='/usr/bin/cut' -BIN_SED='/usr/bin/sed' +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client.sh then it must go in ntp-client.sh.conf . +# +# NTPQV output version of "ntpq -c rv" +# p1 DD-WRT and some other outdated linux distros +# p11 FreeBSD 11 and any linux distro that is up to date +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +BIN_NTPD='/usr/bin/env ntpd' +BIN_NTPQ='/usr/bin/env ntpq' +BIN_NTPDC='/usr/bin/env ntpdc' +BIN_GREP='/usr/bin/env grep' +BIN_TR='/usr/bin/env tr' +BIN_CUT='/usr/bin/env cut' +BIN_SED="/usr/bin/env sed" +BIN_AWK='/usr/bin/env awk' +NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -VER=`$BIN_NTPD --version` - -CMD0=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` -echo $CMD0 +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi +VERSION=1 -CMD1=`$BIN_NTPQ -c rv | $BIN_GREP 'jitter' | $BIN_TR '\n' ' '` -IFS=', ' read -r -a array <<< "$CMD1" +STRATUM=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` -for value in 2 3 4 5 6 -do - echo ${array["$value"]} | $BIN_CUT -d "=" -f 2 -done +# parse the ntpq info that requires version specific info +NTPQ_RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +if [ $NTPQV = "p11" ]; then + OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` + FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` + SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` + CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` + CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}'` +fi +if [ $NTPQV = "p1" ]; then + OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}'` + FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` + SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` + CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` + CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` +fi -if [[ "$VER" =~ '4.2.6p5' ]] -then +VER=`$BIN_NTPD --version` +if [ "$VER" = '4.2.6p5' ]; then USECMD=`echo $BIN_NTPDC -c iostats` else USECMD=`echo $BIN_NTPQ -c iostats localhost` fi -CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_TR '\n' ','` +CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' '` -IFS=',' read -r -a array <<< "$CMD2" +TIMESINCERESET=`echo $CMD2 | $BIN_AWK -F ' ' '{print $1}'` +RECEIVEDBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $2}'` +FREERECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $3}'` +USEDRECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $4}'` +LOWWATERREFILLS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $5}'` +DROPPEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $6}'` +IGNOREDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $7}'` +RECEIVEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $8}'` +PACKETSSENT=`echo $CMD2 | $BIN_AWK -F ' ' '{print $9}'` +PACKETSENDFAILURES=`echo $CMD2 | $BIN_AWK -F ' ' '{print $10}'` +INPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $11}'` +USEFULINPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $12}'` -for value in 0 1 2 3 5 6 7 8 -do - echo ${array["$value"]} | $BIN_SED -e 's/[^0-9]/ /g' -e 's/^ *//g' -e 's/ *$//g' -done +echo '{"data":{"offset":"'$OFFSET\ +'","frequency":"'$FREQUENCY\ +'","sys_jitter":"'$SYS_JITTER\ +'","clk_jitter":"'$CLK_JITTER\ +'","clk_wander":"'$CLK_WANDER\ +'","stratum":"'$STRATUM\ +'","time_since_reset":"'$TIMESINCERESET\ +'","receive_buffers":"'$RECEIVEDBUFFERS\ +'","free_receive_buffers":"'$FREERECEIVEBUFFERS\ +'","used_receive_buffers":"'$USEDRECEIVEBUFFERS\ +'","low_water_refills":"'$LOWWATERREFILLS\ +'","dropped_packets":"'$DROPPEDPACKETS\ +'","ignored_packets":"'$IGNOREDPACKETS\ +'","received_packets":"'$RECEIVEDPACKETS\ +'","packets_sent":"'$PACKETSSENT\ +'","packet_send_failures":"'$PACKETSENDFAILURES\ +'","input_wakeups":"'$PACKETSENDFAILURES\ +'","useful_input_wakeups":"'$USEFULINPUTWAKEUPS\ +'"},"error":"0","errorString":"","version":"'$VERSION'"}' From a676961261623afc37b755a38e55350b39821b85 Mon Sep 17 00:00:00 2001 From: Allison Date: Tue, 18 Sep 2018 20:20:23 -0700 Subject: [PATCH 161/497] Update distro (#194) Adding full detection for ASUSWRT-Merlin --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index d13af0629..d5bd53754 100755 --- a/snmp/distro +++ b/snmp/distro @@ -97,6 +97,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="dd-wrt" fi + if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then + DIST="ASUSWRT-Merlin" + REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + fi + if [ -n "${REV}" ] then OSSTR="${DIST} ${REV}" From e34cb128b513dc3e2ee1a1cf70b0e7b2bc8055b2 Mon Sep 17 00:00:00 2001 From: Brock Alberry Date: Wed, 19 Sep 2018 09:09:04 -0400 Subject: [PATCH 162/497] PhotonOS distro detection (#193) * PhotonOS distro detection Detection before `/etc/os-release` since that is present yet missing the build number. * awk detection combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 --- snmp/distro | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/snmp/distro b/snmp/distro index d5bd53754..d833a0e25 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Arch Linux" REV="" # Omit version since Arch Linux uses rolling releases IGNORE_LSB=1 # /etc/lsb-release would overwrite $REV with "rolling" + + elif [ -f /etc/photon-release ] ; then + DIST=$(head -1 < /etc/photon-release) + REV=$(sed -n -e 's/^.*PHOTON_BUILD_NUMBER=//p' /etc/photon-release) + IGNORE_LSB=1 # photon os does not have /etc/lsb-release nor lsb_release elif [ -f /etc/os-release ] ; then DIST=$(grep '^NAME=' /etc/os-release | cut -d= -f2- | tr -d '"') @@ -93,13 +98,14 @@ elif [ "${OS}" = "Linux" ] ; then fi fi - if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then - DIST="dd-wrt" - fi - - if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then - DIST="ASUSWRT-Merlin" - REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + if [ -x "$(command -v awk)" ]; then # some distros do not ship with awk + if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then + DIST="dd-wrt" + fi + if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then + DIST="ASUSWRT-Merlin" + REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 + fi fi if [ -n "${REV}" ] From d6f6b30715c473bf2f2fb07ae418dc26e082fb8a Mon Sep 17 00:00:00 2001 From: voxnil <14983067+voxnil@users.noreply.github.com> Date: Mon, 15 Oct 2018 13:00:16 -0700 Subject: [PATCH 163/497] Update zfs-linux to use env for python --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index f8bc5a3e1..e9c19e1c6 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -1,4 +1,4 @@ -#!/usr/bin/python3 +#!/usr/bin/env python3 import json import subprocess From 569d03044388a47c6720bec4b043c3566e7b2162 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:26:45 -0400 Subject: [PATCH 164/497] ntp-client data correction (#196) NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. Remove the .sh to simplify for configuration management orchestration scripts. --- snmp/ntp-client | 35 +++++++++++++++++++++++++++++++++ snmp/ntp-client.sh | 48 ---------------------------------------------- 2 files changed, 35 insertions(+), 48 deletions(-) create mode 100755 snmp/ntp-client delete mode 100755 snmp/ntp-client.sh diff --git a/snmp/ntp-client b/snmp/ntp-client new file mode 100755 index 000000000..04db80655 --- /dev/null +++ b/snmp/ntp-client @@ -0,0 +1,35 @@ +#!/bin/sh +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script ntp-client then it must go in ntp-client.conf . +# +# NTPQV output version of "ntpq -c rv" +# Version 4 is the most common and up to date version. +# +# If you are unsure, which to set, run this script and make sure that +# the JSON output variables match that in "ntpq -c rv". +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +BIN_NTPQ='/usr/bin/env ntpq' +BIN_NTPD='/usr/bin/env ntpd' +BIN_GREP='/usr/bin/env grep' +BIN_AWK='/usr/bin/env awk' +BIN_HEAD='/usr/bin/env head' + +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi + +NTP_OFFSET=`$BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` +NTP_VERSION=`$BIN_NTPD --version | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_HEAD -c 1` + +echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' + +exit 0 diff --git a/snmp/ntp-client.sh b/snmp/ntp-client.sh deleted file mode 100755 index c04e8b680..000000000 --- a/snmp/ntp-client.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/sh -# Please make sure the paths below are correct. -# Alternatively you can put them in $0.conf, meaning if you've named -# this script ntp-client.sh then it must go in ntp-client.sh.conf . -# -# NTPQV output version of "ntpq -c rv" -# p1 DD-WRT and some other outdated linux distros -# p11 FreeBSD 11 and any linux distro that is up to date -# -# If you are unsure, which to set, run this script and make sure that -# the JSON output variables match that in "ntpq -c rv". -# -BIN_NTPQ='/usr/bin/env ntpq' -BIN_GREP='/usr/bin/env grep' -BIN_SED="/usr/bin/env sed" -BIN_AWK='/usr/bin/env awk' -NTPQV="p11" -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ -CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG -fi -VERSION=1 -#error and errorString are hardcoded as if the above fails bad json will be generated -RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` -if [ $NTPQV = "p11" ]; then - echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$3\ - "\",\"frequency\":\""$4\ - "\",\"sys_jitter\":\""$5\ - "\",\"clk_jitter\":\""$6\ - "\",\"clk_wander\":\""$7\ - "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" - }' - exit 0 -fi - -if [ $NTPQV = "p1" ]; then - echo $RAW | $BIN_AWK -F ' ' '{print "{\"data\":{\"offset\":\""$2\ - "\",\"frequency\":\""$3\ - "\",\"sys_jitter\":\""$4\ - "\",\"clk_jitter\":\""$5\ - "\",\"clk_wander\":\""$6\ - "\"},\"version\":\""'$VERSION'"\",\"error\":\"0\",\"errorString\":\"\"}" - }' - exit 0 -fi From 61c1266f45526b7a23a3b56fe5d43f9d9a845384 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:27:21 -0400 Subject: [PATCH 165/497] nginx script clean up (#197) - Change script name for simplify of configuration management orchestration scripts. - Change 172.0.0.1 to localhost for better nginx handling. --- snmp/nginx | 34 ++++++++++++++++++++++++++++++++++ snmp/nginx-stats | 37 ------------------------------------- 2 files changed, 34 insertions(+), 37 deletions(-) create mode 100755 snmp/nginx delete mode 100755 snmp/nginx-stats diff --git a/snmp/nginx b/snmp/nginx new file mode 100755 index 000000000..19f16592f --- /dev/null +++ b/snmp/nginx @@ -0,0 +1,34 @@ +#!/usr/bin/env python +import urllib2 +import re + +data = urllib2.urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = [ + "Active", + "Reading", + "Writing", + "Waiting", + "Requests" + ] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/snmp/nginx-stats b/snmp/nginx-stats deleted file mode 100755 index 1cedca5ba..000000000 --- a/snmp/nginx-stats +++ /dev/null @@ -1,37 +0,0 @@ -#!/usr/bin/env python2 -import urllib2 -import re - - -data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() - -params = {} - -for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass - - -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] - - -for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] From 8e6c90e2e11f21ece71d56a912a62c728d8f3c48 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:28:07 -0400 Subject: [PATCH 166/497] phpfpmsf script clean up (#198) - Change script name for simplify of configuration management orchestration scripts. - Update code syntax. --- snmp/{phpfpm-sp => phpfpmsp} | 102 +++++++++++++++++------------------ 1 file changed, 50 insertions(+), 52 deletions(-) rename snmp/{phpfpm-sp => phpfpmsp} (52%) diff --git a/snmp/phpfpm-sp b/snmp/phpfpmsp similarity index 52% rename from snmp/phpfpm-sp rename to snmp/phpfpmsp index 2ae5a5e95..3eb0e0c50 100644 --- a/snmp/phpfpm-sp +++ b/snmp/phpfpmsp @@ -78,60 +78,58 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss ${opts} "${url}")) - [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + phpfpm_response=($(curl -Ss ${opts} "${url}")) + [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 - if [[ "${phpfpm_response[0]}" != "pool:" \ - || "${phpfpm_response[2]}" != "process" \ - || "${phpfpm_response[5]}" != "start" \ - || "${phpfpm_response[12]}" != "accepted" \ - || "${phpfpm_response[15]}" != "listen" \ - || "${phpfpm_response[16]}" != "queue:" \ - || "${phpfpm_response[26]}" != "idle" \ - || "${phpfpm_response[29]}" != "active" \ - || "${phpfpm_response[32]}" != "total" \ - ]] - then - echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" - exit 1; - fi + if [[ "${phpfpm_response[0]}" != "pool:" \ + || "${phpfpm_response[2]}" != "process" \ + || "${phpfpm_response[5]}" != "start" \ + || "${phpfpm_response[12]}" != "accepted" \ + || "${phpfpm_response[15]}" != "listen" \ + || "${phpfpm_response[16]}" != "queue:" \ + || "${phpfpm_response[26]}" != "idle" \ + || "${phpfpm_response[29]}" != "active" \ + || "${phpfpm_response[32]}" != "total" \ + ]] + then + echo "invalid response from phpfpm status server: ${phpfpm_response[*]}" + exit 1; + fi - phpfpm_pool="${phpfpm_response[1]}" - phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" - phpfpm_start_since="${phpfpm_response[11]}" - phpfpm_accepted_conn="${phpfpm_response[14]}" - phpfpm_listen_queue="${phpfpm_response[17]}" - phpfpm_max_listen_queue="${phpfpm_response[21]}" - phpfpm_listen_queue_len="${phpfpm_response[25]}" - phpfpm_idle_processes="${phpfpm_response[28]}" - phpfpm_active_processes="${phpfpm_response[31]}" - phpfpm_total_processes="${phpfpm_response[34]}" - phpfpm_max_active_processes="${phpfpm_response[38]}" - phpfpm_max_children_reached="${phpfpm_response[42]}" - if [ "${phpfpm_response[43]}" == "slow" ] - then - phpfpm_slow_requests="${phpfpm_response[45]}" - else - phpfpm_slow_requests="-1" - fi - - if [[ -z "${phpfpm_pool}" \ - || -z "${phpfpm_start_time}" \ - || -z "${phpfpm_start_since}" \ - || -z "${phpfpm_accepted_conn}" \ - || -z "${phpfpm_listen_queue}" \ - || -z "${phpfpm_max_listen_queue}" \ - || -z "${phpfpm_listen_queue_len}" \ - || -z "${phpfpm_idle_processes}" \ - || -z "${phpfpm_active_processes}" \ - || -z "${phpfpm_total_processes}" \ - || -z "${phpfpm_max_active_processes}" \ - || -z "${phpfpm_max_children_reached}" \ - ]] - then - echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" - exit 1 - fi + phpfpm_pool="${phpfpm_response[1]}" + phpfpm_start_time="${phpfpm_response[7]} ${phpfpm_response[8]}" + phpfpm_start_since="${phpfpm_response[11]}" + phpfpm_accepted_conn="${phpfpm_response[14]}" + phpfpm_listen_queue="${phpfpm_response[17]}" + phpfpm_max_listen_queue="${phpfpm_response[21]}" + phpfpm_listen_queue_len="${phpfpm_response[25]}" + phpfpm_idle_processes="${phpfpm_response[28]}" + phpfpm_active_processes="${phpfpm_response[31]}" + phpfpm_total_processes="${phpfpm_response[34]}" + phpfpm_max_active_processes="${phpfpm_response[38]}" + phpfpm_max_children_reached="${phpfpm_response[42]}" + if [ "${phpfpm_response[43]}" == "slow" ]; then + phpfpm_slow_requests="${phpfpm_response[45]}" + else + phpfpm_slow_requests="-1" + fi + + if [[ -z "${phpfpm_pool}" \ + || -z "${phpfpm_start_time}" \ + || -z "${phpfpm_start_since}" \ + || -z "${phpfpm_accepted_conn}" \ + || -z "${phpfpm_listen_queue}" \ + || -z "${phpfpm_max_listen_queue}" \ + || -z "${phpfpm_listen_queue_len}" \ + || -z "${phpfpm_idle_processes}" \ + || -z "${phpfpm_active_processes}" \ + || -z "${phpfpm_total_processes}" \ + || -z "${phpfpm_max_active_processes}" \ + || -z "${phpfpm_max_children_reached}" \ + ]]; then + echo "empty values got from phpfpm status server: ${phpfpm_response[*]}" + exit 1 + fi echo $phpfpm_pool echo $phpfpm_start_time From 233c1cfde2f9a7147fb3ceb92ac6f8281f6ea3e6 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Wed, 17 Oct 2018 12:28:39 -0400 Subject: [PATCH 167/497] osupdate script clean up (#199) - Change script name for simplify of configuration management orchestration scripts. - Update code syntax. --- snmp/{os-updates.sh => osupdate} | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) rename snmp/{os-updates.sh => osupdate} (85%) diff --git a/snmp/os-updates.sh b/snmp/osupdate similarity index 85% rename from snmp/os-updates.sh rename to snmp/osupdate index 33e1f9c62..e4185d408 100755 --- a/snmp/os-updates.sh +++ b/snmp/osupdate @@ -1,28 +1,28 @@ #!/usr/bin/env bash ################################################################ # copy this script to /etc/snmp/ and make it executable: # -# chmod +x /etc/snmp/os-updates.sh # +# chmod +x /etc/snmp/osupdate # # ------------------------------------------------------------ # # edit your snmpd.conf and include: # -# extend osupdate /opt/os-updates.sh # +# extend osupdate /etc/snmp/osupdate # #--------------------------------------------------------------# # restart snmpd and activate the app for desired host # #--------------------------------------------------------------# # please make sure you have the path/binaries below # -################################################################ -BIN_WC='/usr/bin/wc' -BIN_GREP='/bin/grep' +################################################################ +BIN_WC='/usr/bin/env wc' +BIN_GREP='/usr/bin/env grep' CMD_GREP='-c' CMD_WC='-l' -BIN_ZYPPER='/usr/bin/zypper' +BIN_ZYPPER='/usr/bin/env zypper' CMD_ZYPPER='-q lu' -BIN_YUM='/usr/bin/yum' +BIN_YUM='/usr/bin/env yum' CMD_YUM='-q check-update' -BIN_DNF='/usr/bin/dnf' +BIN_DNF='/usr/bin/env dnf' CMD_DNF='-q check-update' -BIN_APT='/usr/bin/apt-get' +BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' -BIN_PACMAN='/usr/bin/pacman' +BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' ################################################################ From 78a75e87702d5ba2b9daa9d27c2f75fafd4e945a Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Thu, 18 Oct 2018 10:37:10 -0400 Subject: [PATCH 168/497] Fix binary operator expected error (#203) --- snmp/osupdate | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index e4185d408..ed8e68888 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -28,7 +28,7 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f $BIN_ZYPPER ]; then +if [ -f "$BIN_ZYPPER" ]; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` if [ $UPDATES -ge 2 ]; then @@ -36,7 +36,7 @@ if [ -f $BIN_ZYPPER ]; then else echo "0"; fi -elif [ -f $BIN_DNF ]; then +elif [ -f "$BIN_DNF" ]; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -44,7 +44,7 @@ elif [ -f $BIN_DNF ]; then else echo "0"; fi -elif [ -f $BIN_PACMAN ]; then +elif [ -f "$BIN_PACMAN" ]; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -52,7 +52,7 @@ elif [ -f $BIN_PACMAN ]; then else echo "0"; fi -elif [ -f $BIN_YUM ]; then +elif [ -f "$BIN_YUM" ]; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -60,7 +60,7 @@ elif [ -f $BIN_YUM ]; then else echo "0"; fi -elif [ -f $BIN_APT ]; then +elif [ -f "$BIN_APT" ]; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -ge 1 ]; then From 5e05c3087515b312b32e76acce44970cf54417d1 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Tue, 23 Oct 2018 08:51:12 -0400 Subject: [PATCH 169/497] fix nginx script indentation (#205) --- snmp/nginx | 40 +++++++++++++++++----------------------- 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/snmp/nginx b/snmp/nginx index 19f16592f..06efab6e6 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python2 import urllib2 import re @@ -7,28 +7,22 @@ data = urllib2.urlopen('http://localhost/nginx-status').read() params = {} for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] From 942ffb8ae85a3d8189cd45148470fa89d7982119 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lim=20Whiteley?= Date: Tue, 23 Oct 2018 17:46:54 +0100 Subject: [PATCH 170/497] Add divide by zero check (#191) On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error Traceback (most recent call last): File "/usr/local/bin/zfs-linux", line 178, in sys.exit(main(sys.argv[1:])) File "/usr/local/bin/zfs-linux", line 76, in main DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 ZeroDivisionError: division by zero --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index e9c19e1c6..c5f36256c 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -73,7 +73,7 @@ def main(args): CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 From c3839c00e89512f911b568df1330db25edcd69d2 Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Thu, 25 Oct 2018 07:17:42 -0400 Subject: [PATCH 171/497] Fix package manager detection (#204) * Fix package manager detection * use release file for os detection * Use command to to validate package manager type * check if exists and the execute permission is granted * make script more portable --- snmp/osupdate | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index ed8e68888..f45493dc4 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -28,7 +28,7 @@ CMD_PACMAN='-Sup' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -if [ -f "$BIN_ZYPPER" ]; then +if command -v zypper &>/dev/null ; then # OpenSUSE UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` if [ $UPDATES -ge 2 ]; then @@ -36,7 +36,7 @@ if [ -f "$BIN_ZYPPER" ]; then else echo "0"; fi -elif [ -f "$BIN_DNF" ]; then +elif command -v dnf &>/dev/null ; then # Fedora UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -44,7 +44,7 @@ elif [ -f "$BIN_DNF" ]; then else echo "0"; fi -elif [ -f "$BIN_PACMAN" ]; then +elif command -v pacman &>/dev/null ; then # Arch UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -52,7 +52,7 @@ elif [ -f "$BIN_PACMAN" ]; then else echo "0"; fi -elif [ -f "$BIN_YUM" ]; then +elif command -v yum &>/dev/null ; then # CentOS / Redhat UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` if [ $UPDATES -ge 1 ]; then @@ -60,7 +60,7 @@ elif [ -f "$BIN_YUM" ]; then else echo "0"; fi -elif [ -f "$BIN_APT" ]; then +elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` if [ $UPDATES -ge 1 ]; then From 3b8d6a6697da1721b64b07650e1205c98d5f5745 Mon Sep 17 00:00:00 2001 From: gardar Date: Thu, 25 Oct 2018 19:19:20 +0000 Subject: [PATCH 172/497] CloudLinux distro detection (#208) Added CloudLinux distro detection, previously CloudLinux got identified as RedHat --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index d833a0e25..ce146801c 100755 --- a/snmp/distro +++ b/snmp/distro @@ -24,6 +24,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then DIST="CentOS" + elif [ "${DIST}" = "CloudLinux" ]; then + DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then DIST="Mandriva" PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` From 48f2fff8e93d3cd46b8aca210e7785d1ec1ee77c Mon Sep 17 00:00:00 2001 From: VVelox Date: Thu, 22 Nov 2018 09:04:58 -0600 Subject: [PATCH 173/497] add portactivity SNMP extend (#159) * add portactivity SNMP extend in its initial form * update for the current json_app_get * add version to the returned JSON * add basic POD documentation --- snmp/portactivity | 352 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 352 insertions(+) create mode 100755 snmp/portactivity diff --git a/snmp/portactivity b/snmp/portactivity new file mode 100755 index 000000000..430ae5190 --- /dev/null +++ b/snmp/portactivity @@ -0,0 +1,352 @@ +#!/usr/bin/env perl + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# FreeBSD /usr/include/netinet/tcp_fsm.h +# Linux netstat(8) +# FreeBSD --> Linux +# LISTEN --> LISTEN +# CLOSED --> CLOSED +# SYN_SENT --> SYN_SENT +# SYN_RECEIVED -->SYN_RECV +# ESTABLISHED --> ESTABLISHED +# CLOSE_WAIT --> CLOSE_WAIT +# FIN_WAIT_1 --> FIN_WAIT1 +# CLOSING --> CLOSING +# LAST_ACK --> LAST_ACK +# FIN_WAIT_2 --> FIN_WAIT2 +# TIME_WAIT --> TIME_WAIT +# ((no equivalent)) --> UNKNOWN +# +# UNKNOWN is being regarded as a valid state for all and will be used on OSes that supported it +# The names returned by default are those used by FreeBSD. + +=head1 NAME + +portactivity - Generates JSON output based on netstat data for the specificied TCP services. + +=head1 SYNOPSIS + +portactivity [B<-P>] B<-p> + +=head1 USAGE + +This is meant to be used as a SNMP extend for use with json_app_get in LibreNMS. + +Below is a example of its usage with netsnmpd and checking HTTP and SSH. + + extend portactivity /etc/snmp/portactivity -p http,ssh + +=head1 SWITCHES + +=head2 B<-P> + +Prints the JSON in easily human readable format. + +=head2 B<-p> + +This is a comma seperated list of TCP services to check. + +=head1 SERVICES + +NSS is used to resolve the TCP service protocol names. All the ones listed with -p +must be findable that way or it will error. + +If you are running something on a non-standard port and want to check for it, you either +have to use the name of the port it is on, add it to the database, or change it in the +database(if it is already there under a undesired name). + +In general the file in question on most systems is going to be '/etc/services' and you +will need to run services_mkdb(8) after updating it. But for specifics you will want to +consult services(5). + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use Parse::Netstat qw(parse_netstat); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "Port Activity SNMP stats extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p A comma seperated list of TCP protocols to check for in netstat.\n". + "-P Print the output in a human readable manner.\n"; +} + +#returns aa new hash with all zeroed values for a new protocol +sub newProto{ + + return { + 'total_conns'=>0, + 'total_to'=>0, + 'total_from'=>0, + 'total'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + 'to'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + 'from'=>{ + 'LISTEN'=>0, + 'CLOSED'=>0, + 'SYN_SENT'=>0, + 'SYN_RECEIVED'=>0, + 'ESTABLISHED'=>0, + 'CLOSE_WAIT'=>0, + 'FIN_WAIT_1'=>0, + 'CLOSING'=>0, + 'LAST_ACK'=>0, + 'FIN_WAIT_2'=>0, + 'TIME_WAIT'=>0, + 'UNKNOWN'=>0, + 'other'=>0, + }, + } + ; +} + +#returns the json output +sub return_json{ + my %to_return; + if(defined($_[0])){ + %to_return= %{$_[0]}; + } + my $pretty=$_[1]; + + if (!defined( $to_return{data} ) ){ + $to_return{data}={}; + } + + my $j=JSON->new; + + if ( $pretty ){ + $j->pretty(1); + } + + print $j->encode( \%to_return ); + + if ( ! $pretty ){ + print "\n"; + } +} + +my %valid_states=( + 'LISTEN'=>1, + 'CLOSED'=>1, + 'SYN_SENT'=>1, + 'SYN_RECEIVED'=>1, + 'ESTABLISHED'=>1, + 'CLOSE_WAIT'=>1, + 'FIN_WAIT_1'=>1, + 'CLOSING'=>1, + 'LAST_ACK'=>1, + 'FIN_WAIT_2'=>1, + 'TIME_WAIT'=>1, + 'UNKNOWN'=>1, + ); + +#gets the options +my %opts=(); +getopts('p:P', \%opts); + +#what will be returned +my %to_return; +$to_return{error}='0'; +$to_return{errorString}=''; +$to_return{version}=1; + +if (! defined( $opts{p} ) ){ + $to_return{errorString}='No services specificied to check for'; + $to_return{error}=1; + return_json(\%to_return, $opts{P}); + exit 1; +} + +#the list of protocols to check for +my @protos_array=split(/\,/, $opts{p}); + +#holds the various protocol hashes +my %protos; + +#make sure each one specificied is defined and build the hash that will be returned +my $protos_array_int=0; +while ( defined( $protos_array[$protos_array_int] ) ){ + $protos{ $protos_array[$protos_array_int] }=newProto; + + #check if it exists + my $port=getservbyname( $protos_array[$protos_array_int] , 'tcp' ); + + # if it is not defined, then we error + if ( !defined( $port ) ){ + $to_return{errorString}='"'.$protos_array[$protos_array_int].'" is not a known service either add it or double check your spelling'; + $to_return{error}=4; + return_json(\%to_return, $opts{P}); + exit 4; + } + + $protos_array_int++; +} + +my $os=$^O; + +my $netstat; + +#make sure this is a supported OS +if ( $os eq 'freebsd' ){ + $netstat='netstat -S -p tcp' +}elsif( $os eq 'linux' ){ + $netstat='netstat -n' +}else{ + $to_return{errorString}=$os.' is not a supported OS as of currently'; + $to_return{error}=3; + return_json(\%to_return, $opts{P}); + exit 3; +} + +my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os); + +#check to make sure that it was able to parse the output +if ( + (!defined( $res->[1] )) || + ($res->[1] ne 'OK' ) + ){ + $to_return{errorString}='Unable to parse netstat output'; + $to_return{error}=2; + return_json(\%to_return, $opts{P}); + exit 2; +} + +#chew through each connection +my $active_conns_int=0; +while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ + my $conn=$res->[2]{active_conns}[$active_conns_int]; + + #we only care about TCP currently + if ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ){ + $protos_array_int=0; + my $service; + while( + ( defined( $protos_array[ $protos_array_int ] ) ) && + ( !defined( $service ) ) #stop once we find it + ){ + #check if this matches either ports + if ( + ( $protos_array[ $protos_array_int ] eq $conn->{'local_port'} ) || + ( $protos_array[ $protos_array_int ] eq $conn->{'foreign_port'} ) + ){ + $service=$protos_array[ $protos_array_int ]; + } + + $protos_array_int++; + } + + #only handle it if is a service we are watching for + if ( defined( $service ) ){ + my $processed=0; + + my $state=$conn->{'state'}; + #translate the state names + if ( $os eq 'linux' ){ + if ( $state eq 'SYN_RECV' ){ + $state='SYN_RECEIVED'; + }elsif( $state eq 'FIN_WAIT1' ){ + $state='FIN_WAIT_1'; + }elsif( $state eq 'FIN_WAIT2' ){ + $state='FIN_WAIT_2' + } + } + + #only count the state towards the total if not listening + if ( $state ne 'LISTEN' ){ + $protos{$service}{'total_conns'}++; + } + + #make sure the state is a valid one + # if it is not a valid one, set it to other, meaning something unexpected was set for the state that should not be + if ( ! defined( $valid_states{$state} ) ){ + $state='other'; + } + + #increment the total state + $protos{$service}{'total'}{$state}++; + + if ( + ( $conn->{'foreign_port'} eq $service ) && + ( $state ne 'LISTEN' ) + ){ + $protos{$service}{'total_from'}++; + $protos{$service}{'from'}{$state}++; + $processed=1; + } + + if ( + ( $conn->{'local_port'} eq $service ) && + ( $state ne 'LISTEN' ) && + ( ! $processed ) + ){ + $protos{$service}{'total_to'}++; + $protos{$service}{'to'}{$state}++; + } + + } + + } + + $active_conns_int++; +} + +#return the finished product +$to_return{data}=\%protos; +return_json(\%to_return, $opts{P}); +exit 0; From b45845f54bf5b24c6393a26403e7e1cdf25263b7 Mon Sep 17 00:00:00 2001 From: Kovrinic Date: Wed, 28 Nov 2018 21:22:16 -0600 Subject: [PATCH 174/497] Added Ubuntu 14.04 zfs support --- snmp/zfs-linux | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) mode change 100644 => 100755 snmp/zfs-linux diff --git a/snmp/zfs-linux b/snmp/zfs-linux old mode 100644 new mode 100755 index c5f36256c..87677d0b5 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -2,6 +2,13 @@ import json import subprocess +def proc_err(cmd, proc): + # output process error and first line of error code + return "{}{}".format( + subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + ) + def main(args): res = {} @@ -95,9 +102,24 @@ def main(args): PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 # pools - proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) - if proc.returncode != 0: - return proc.returncode + exact_size = True + zpool_cmd = ['/sbin/zpool'] + zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] + std = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True} + + ## account for variations between ZoL zfs versions + proc = subprocess.run(zpool_cmd_list, **std) + if (proc.returncode == 1) and (('root' in proc.stderr) or ('admin' in proc.stderr)): + zpool_cmd = ['sudo'] + zpool_cmd # elevate zpool with sudo + zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] + proc = subprocess.run(zpool_cmd_list, **std) + if (proc.returncode == 2): + # -p option is not present in older versions + del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue + proc = subprocess.run(zpool_cmd_list, **std) + exact_size = False + if (proc.returncode != 0): + return proc_err(zpool_cmd_list, proc) pools = [] FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] @@ -110,6 +132,18 @@ def main(args): info['dedup'] = info['dedup'].rstrip('x') info['cap'] = info['cap'].rstrip('%') + # zfs-06.5.11 fix + if not exact_size: + zpool_cmd_get = zpool_cmd + ['get', '-pH', 'size,alloc,free', info['name']] + proc2 = subprocess.run(zpool_cmd_get, **std) + if (proc2.returncode != 0): + return proc_err(zpool_cmd_get, proc2) + + info2 = dict([tuple(s.split('\t')[1:3]) for s in proc2.stdout.splitlines()]) + info['size'] = info2['size'] + info['alloc'] = info2['allocated'] + info['free'] = info2['free'] + pools.append(info) res = { From e78383f163430ebe09873b8eb7ace445bdac3924 Mon Sep 17 00:00:00 2001 From: dsgagi Date: Wed, 12 Dec 2018 16:09:25 +0100 Subject: [PATCH 175/497] Update distro --- snmp/distro | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index ce146801c..f8926acca 100755 --- a/snmp/distro +++ b/snmp/distro @@ -50,7 +50,7 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Devuan `cat /etc/devuan_version`" REV="" - elif [ -f /etc/debian_version ] ; then + elif [ -f /etc/debian_version -a -f /usr/bin/lsb_release ] ; then DIST="Debian `cat /etc/debian_version`" REV="" ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` From 36f4bb275f7273f7c237636d0fb4cf3d2190db94 Mon Sep 17 00:00:00 2001 From: dsgagi Date: Fri, 14 Dec 2018 18:47:54 +0100 Subject: [PATCH 176/497] Update distro Minor changes to the code, for better output. --- snmp/distro | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index f8926acca..88cc60d64 100755 --- a/snmp/distro +++ b/snmp/distro @@ -50,12 +50,14 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Devuan `cat /etc/devuan_version`" REV="" - elif [ -f /etc/debian_version -a -f /usr/bin/lsb_release ] ; then + elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + if [ -f /usr/bin/lsb_release ] ; then + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian `cat /etc/debian_version`" fi elif [ -f /etc/gentoo-release ] ; then From 2818f5890c2051affb0765527323505f0ef560f0 Mon Sep 17 00:00:00 2001 From: dsgagi Date: Fri, 14 Dec 2018 18:49:58 +0100 Subject: [PATCH 177/497] Update distro Remove extra white spaces. --- snmp/distro | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/distro b/snmp/distro index 88cc60d64..0d6578c37 100755 --- a/snmp/distro +++ b/snmp/distro @@ -54,10 +54,10 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Debian `cat /etc/debian_version`" REV="" if [ -f /usr/bin/lsb_release ] ; then - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian `cat /etc/debian_version`" fi elif [ -f /etc/gentoo-release ] ; then From 185531b7ceff60551169c8276f0311c1f57d1242 Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Sun, 16 Dec 2018 22:21:00 +0800 Subject: [PATCH 178/497] Added FreeNAS Version support (#215) Hi, I added FreeNAS version information support, as shown in the figure: ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) --- snmp/distro | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index 0d6578c37..9f1e9d442 100755 --- a/snmp/distro +++ b/snmp/distro @@ -125,7 +125,12 @@ elif [ "${OS}" = "Darwin" ] ; then fi elif [ "${OS}" = "FreeBSD" ] ; then - OSSTR=`/usr/bin/uname -mior` + DIST=$(cat /etc/version | cut -d'-' -f 1) + if [ "${DIST}" = "FreeNAS" ]; then + OSSTR=`cat /etc/version | cut -d' ' -f 1` + else + OSSTR=`/usr/bin/uname -mior` + fi fi echo ${OSSTR} From 708892dd7b90e53fe671750a4e0d1602228ce1ca Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Tue, 25 Dec 2018 09:15:22 +0800 Subject: [PATCH 179/497] Added Proxmox VE Versoin support --- snmp/distro | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/snmp/distro b/snmp/distro index 9f1e9d442..cd9e814bf 100755 --- a/snmp/distro +++ b/snmp/distro @@ -59,7 +59,10 @@ elif [ "${OS}" = "Linux" ] ; then if [ "${ID}" = "Raspbian" ] ; then DIST="Raspbian `cat /etc/debian_version`" fi - + if [ -f /usr/bin/pveversion ]; then + DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + fi + elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" REV=$(tr -d '[[:alpha:]]' Date: Fri, 28 Dec 2018 20:08:46 -0600 Subject: [PATCH 180/497] JSON SNMP extend for UPS-APC app. (#189) * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON * finish documenting it * add version and remove units from the returned values --- snmp/ups-apcups | 133 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 133 insertions(+) create mode 100755 snmp/ups-apcups diff --git a/snmp/ups-apcups b/snmp/ups-apcups new file mode 100755 index 000000000..f3f45d7df --- /dev/null +++ b/snmp/ups-apcups @@ -0,0 +1,133 @@ +#!/usr/bin/env perl +# Author: Zane C. Bowers-Hadley + +# https://docs.librenms.org/#Extensions/Applications/#ups-apcups +# See the above for additional information not documented in the POD below. + +=head1 DESCRIPTION + +This is a SNMP extend for apcupsd for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend ups-apcups /etc/snmp/ups-apcups + +Now if for example apcaccess is not in the PATH enviromental variables that snmpd is running +with, you may need to do something like below. + + extend /usr/bin/env PATH=/sbin:/bin:/usr/sbin:/usr/bin:/usr/local/sbin:/usr/local/bin /etc/snmp/ups-apcups + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use warnings; +use Getopt::Std; +use JSON; + +# should be no reason to change this +# better to use env to make sure it is in your path when you run this +my $apcaccess='apcaccess'; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "ups-apcups SNMP extend 0.0.0\n"; +}; + +sub main::HELP_MESSAGE { + print "\n"; +} + +#gets the options +my %opts=(); +getopts('p', \%opts); + +#holds what will be returned +my %data; +my %toReturn; +$toReturn{version}=1; + +# get the current status from apcupsd +my $apcaccess_output=`$apcaccess`; +$toReturn{error}=$?; + +# check for bad exit codes +if ( $? == -1){ + $toReturn{errorString}='failed to run apcaccess'; +} +elsif ($? & 127) { + $toReturn{errorString}= sprintf "apcaccess died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; +} + +# if no bad exit codes, we can process $apcaccess_output +if ( $toReturn{error} == 0 ){ + # holds the found data for the apcupsd status + my %status; + + # pulls apart the output + my @lines=split(/\n/, $apcaccess_output); + foreach my $line ( @lines ){ + my ( $var, $val )=split(/\ *\:\ */, $line, 2); + $val=~s/\ .*//; + $status{$var}=$val; + } + + #pull the desired variables from the output + $data{charge}=$status{BCHARGE}; + $data{time_remaining}=$status{TIMELEFT}; + $data{battery_nominal}=$status{NOMBATTV}; + $data{battery_voltage}=$status{BATTV}; + $data{input_voltage}=$status{LINEV}; + $data{nominal_voltage}=$status{NOMINV}; + $data{load}=$status{LOADPCT}; +} + +# add the data to be return to the return hah +$toReturn{data}=\%data; + +# convert $toReturn to JSON and pretty print if asked to +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%toReturn ); +if (! $opts{p} ){ + print "\n"; +} +exit 0; From 6e5c4559e6c89fcbb6235f3a55cdaf40374fbdfe Mon Sep 17 00:00:00 2001 From: VVelox Date: Fri, 28 Dec 2018 20:10:13 -0600 Subject: [PATCH 181/497] convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) * convert fbsdnfsclient over to JSON * Convert the server stuff to JSON and fix the output of the client extend. * misc. stuff * lots of cleanup and sanity added to the FreeBSD NFS scripts * fix the #! line * update the docs at the top --- snmp/fbsdnfsclient | 275 +++++++++++++++++++++++---------------------- snmp/fbsdnfsserver | 225 +++++++++++++++++++++---------------- 2 files changed, 271 insertions(+), 229 deletions(-) diff --git a/snmp/fbsdnfsclient b/snmp/fbsdnfsclient index f41c7b606..7e3d57722 100644 --- a/snmp/fbsdnfsclient +++ b/snmp/fbsdnfsclient @@ -1,135 +1,174 @@ -#!/usr/local/bin/perl +#!/usr/bin/env perl -# Add this to snmpd.conf as below. -# extend fbsdnfsclient /etc/snmp/fbsdnfsclient +=head1 DESCRIPTION + +This is a SNMP extend for FreeBSD NFS server stats for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend fbsdnfsclient /etc/snmp/fbsdnfsclient + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +use strict; +use warnings; +use Getopt::Std; +use JSON; + +#the version of returned data +my $VERSION=1; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "fbsdnfsclient SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p Print the JSON in a pretty manner.\n"; + exit 0; +} + +#the data to return +my %to_return; +$to_return{'version'}=$VERSION; +$to_return{'error'}='0'; +$to_return{'errorString'}=''; my $nfsstatOutput=`/usr/bin/nfsstat`; -my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); -my $int=0; +$to_return{error}=$?; -my ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, - $TimedOut, - $Invalid, - $XReplies, - $Retries, - $Requests, - $AttrHits, - $AttrMisses, - $LkupHits, - $LkupMisses, - $BioRHits, - $BioRMisses, - $BioWHits, - $BioWMisses, - $BioRLHits, - $BioRLMisses, - $BioDHits, - $BioDMisses, - $DirEHits, - $DirEMisses, - $AccsHits, - $AccsMisses, - ); +# check for bad exit codes +if ( $? == -1){ + $to_return{errorString}='failed to run nfsstat'; +} +elsif ($? & 127) { + $to_return{errorString}= sprintf "nfsstat died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $to_return{error}=$? >> 8; + $to_return{errorString}="nfsstat exited with ".$to_return{error}; +} +# pull the output of nfssetat appart +my %data; +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; if ( $int == 3 ){ ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, + $data{Getattr}, + $data{Setattr}, + $data{Lookup}, + $data{Readlink}, + $data{Read}, + $data{Write}, + $data{Create}, + $data{Remove}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 5 ){ ( - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, + $data{Rename}, + $data{Link}, + $data{Symlink}, + $data{Mkdir}, + $data{Rmdir}, + $data{Readdir}, + $data{RdirPlus}, + $data{Access}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 7 ){ ( - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, + $data{Mknod}, + $data{Fsstat}, + $data{Fsinfo}, + $data{PathConf}, + $data{Commit}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 10 ){ ( - $TimedOut, - $Invalid, - $XReplies, - $Retries, - $Requests, + $data{TimedOut}, + $data{Invalid}, + $data{XReplies}, + $data{Retries}, + $data{Requests}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 13 ){ ( - $AttrHits, - $AttrMisses, - $LkupHits, - $LkupMisses, - $BioRHits, - $BioRMisses, - $BioWHits, - $BioWMisses, + $data{AttrHits}, + $data{AttrMisses}, + $data{LkupHits}, + $data{LkupMisses}, + $data{BioRHits}, + $data{BioRMisses}, + $data{BioWHits}, + $data{BioWMisses}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 15 ){ ( - $BioRLHits, - $BioRLMisses, - $BioDHits, - $BioDMisses, - $DirEHits, - $DirEMisses, - $AccsHits, - $AccsMisses, + $data{BioRLHits}, + $data{BioRLMisses}, + $data{BioDHits}, + $data{BioDMisses}, + $data{DirEHits}, + $data{DirEMisses}, + $data{AccsHits}, + $data{AccsMisses}, )=split( /\ /, $nfsstatOutputA[$int] ); } @@ -137,45 +176,15 @@ while( defined( $nfsstatOutputA[$int] ) ){ $int++; } -print $Getattr."\n". - $Setattr."\n". - $Lookup."\n". - $Readlink."\n". - $Read."\n". - $Write."\n". - $Create."\n". - $Remove."\n". - $Rename."\n". - $Link."\n". - $Symlink."\n". - $Mkdir."\n". - $Rmdir."\n". - $Readdir."\n". - $RdirPlus."\n". - $Access."\n". - $Mknod."\n". - $Fsstat."\n". - $Fsinfo."\n". - $PathConf."\n". - $Commit."\n". - $TimedOut."\n". - $Invalid."\n". - $XReplies."\n". - $Retries."\n". - $Requests."\n". - $AttrHits."\n". - $AttrMisses."\n". - $LkupHits."\n". - $LkupMisses."\n". - $BioRHits."\n". - $BioRMisses."\n". - $BioWHits."\n". - $BioWMisses."\n". - $BioRLHits."\n". - $BioRLMisses."\n". - $BioDHits."\n". - $BioDMisses."\n". - $DirEHits."\n". - $DirEMisses."\n". - $AccsHits."\n". - $AccsMisses."\n"; +#add the data has to the return hash +$to_return{data}=\%data; + +#finally render the JSON +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%to_return ); +if ( ! $opts{p} ){ + print "\n"; +} diff --git a/snmp/fbsdnfsserver b/snmp/fbsdnfsserver index 4664cfa61..e9402b01a 100644 --- a/snmp/fbsdnfsserver +++ b/snmp/fbsdnfsserver @@ -1,117 +1,168 @@ #!/usr/local/bin/perl -# Add this to snmpd.conf as below. -# extend fbsdnfsserver /etc/snmp/fbsdnfsserver +=head1 DESCRIPTION + +This is a SNMP extend for FreeBSD NFS server stats for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + +Below is a basic example of setting it up snmpd.conf for NetSNMP. + + extend fbsdnfsserver /etc/snmp/fbsdnfsserver + +=cut + +#Copyright (c) 2018, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + + +use strict; +use warnings; +use JSON; +use Getopt::Std; + +#gets the options +my %opts=(); +getopts('p', \%opts); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; +sub main::VERSION_MESSAGE { + print "fbsdnfsclient SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n". + "-p Print the JSON in a pretty manner.\n"; + exit 0; +} + +my $VERSION=1; + +#the data to return +my %to_return; +$to_return{'version'}=$VERSION; +$to_return{'error'}='0'; +$to_return{'errorString'}=''; my $nfsstatOutput=`/usr/bin/nfsstat`; -my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); -my $int=0; +$to_return{error}=$?; + +# check for bad exit codes +if ( $? == -1){ + $to_return{errorString}='failed to run nfsstat'; +} +elsif ($? & 127) { + $to_return{errorString}= sprintf "nfsstat died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; +} else { + $to_return{error}=$? >> 8; + $to_return{errorString}="nfsstat exited with ".$to_return{error}; +} -my ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access, - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit, - $RetFailed, - $Faults, - $Inprog, - $Idem, - $Nonidem, - $Misses, - $WriteOps, - $WriteRPC, - $Opsaved - ); +my @nfsstatOutputA=split( /\n/, $nfsstatOutput ); +my $int=0; +my %data; while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; if ( $int == 19 ){ ( - $Getattr, - $Setattr, - $Lookup, - $Readlink, - $Read, - $Write, - $Create, - $Remove, + $data{'Getattr'}, + $data{'Setattr'}, + $data{'Lookup'}, + $data{'Readlink'}, + $data{'Read'}, + $data{'Write'}, + $data{'Create'}, + $data{'Remove'}, )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 21 ){ ( - $Rename, - $Link, - $Symlink, - $Mkdir, - $Rmdir, - $Readdir, - $RdirPlus, - $Access + $data{'Rename'}, + $data{'Link'}, + $data{'Symlink'}, + $data{'Mkdir'}, + $data{'Rmdir'}, + $data{'Readdir'}, + $data{'RdirPlus'}, + $data{'Access'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 23 ){ ( - $Mknod, - $Fsstat, - $Fsinfo, - $PathConf, - $Commit + $data{'Mknod'}, + $data{'Fsstat'}, + $data{'Fsinfo'}, + $data{'PathConf'}, + $data{'Commit'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 25 ){ ( - $RetFailed + $data{'RetFailed'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 27 ){ ( - $Faults + $data{'Faults'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 30 ){ ( - $Inprog, - $Idem, - $Nonidem, - $Misses + $data{'Inprog'}, + $data{'Idem'}, + $data{'Nonidem'}, + $data{'Misses'} )=split( /\ /, $nfsstatOutputA[$int] ); } if ( $int == 33 ){ ( - $WriteOps, - $WriteRPC, - $Opsaved + $data{'WriteOps'}, + $data{'WriteRPC'}, + $data{'Opsaved'} )=split( /\ /, $nfsstatOutputA[$int] ); } @@ -119,33 +170,15 @@ while( defined( $nfsstatOutputA[$int] ) ){ $int++; } -print $Getattr."\n". - $Setattr."\n". - $Lookup."\n". - $Readlink."\n". - $Read."\n". - $Write."\n". - $Create."\n". - $Remove."\n". - $Rename."\n". - $Link."\n". - $Symlink."\n". - $Mkdir."\n". - $Rmdir."\n". - $Readdir."\n". - $RdirPlus."\n". - $Access."\n". - $Mknod."\n". - $Fsstat."\n". - $Fsinfo."\n". - $PathConf."\n". - $Commit."\n". - $RetFailed."\n". - $Faults."\n". - $Inprog."\n". - $Idem."\n". - $Nonidem."\n". - $Misses."\n". - $WriteOps."\n". - $WriteRPC."\n". - $Opsaved."\n"; +#add the data has to the return hash +$to_return{data}=\%data; + +#finally render the JSON +my $j=JSON->new; +if ( $opts{p} ){ + $j->pretty(1); +} +print $j->encode( \%to_return ); +if ( ! $opts{p} ){ + print "\n"; +} From 446a6bf4ebe4ec820b7701672119d82bdf897189 Mon Sep 17 00:00:00 2001 From: Mike Centola Date: Thu, 10 Jan 2019 00:35:28 -0500 Subject: [PATCH 182/497] Added gpsd script for SNMP Extend (#217) Fixed Typos Fixed another typo --- snmp/gpsd | 45 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 45 insertions(+) create mode 100755 snmp/gpsd diff --git a/snmp/gpsd b/snmp/gpsd new file mode 100755 index 000000000..48f1be4ad --- /dev/null +++ b/snmp/gpsd @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2019 Mike Centola +# +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script gpsd.sh then it must go in gpsd.sh.conf . +# +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ + +BIN_GPIPE='/usr/bin/env gpspipe' +BIN_GREP='/usr/bin/env grep' +BIN_PYTHON='/usr/bin/env python' + +# Check for config file +CONFIG=$0".conf" +if [ -f $CONFIG ]; then + . $CONFIG +fi + +# Create Temp File +TMPFILE=$(mktemp) +trap "rm -f $TMPFILE" 0 2 3 15 + +# Write GPSPIPE Data to Temp File +$BIN_GPIPE -w -n 10 > $TMPFILE + +# Parse Temp file for GPSD Data +VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` +GPSDMODE=`cat $TMPFILE | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]'` +HDOP=`cat $TMPFILE | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]'` +VDOP=`cat $TMPFILE | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]'` +LAT=`cat $TMPFILE | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]'` +LONG=`cat $TMPFILE | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]'` +ALT=`cat $TMPFILE | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]'` +SATS=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])'` +SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])'` + +# Output info for SNMP Extend +echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' + +rm $TMPFILE \ No newline at end of file From 4c496479e377c86f650616ed351aa835faeba1d8 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Thu, 10 Jan 2019 18:40:40 +1300 Subject: [PATCH 183/497] Fix: InnoDB stat support for MariaDB v10+ (#211) * mariadb innodb support for v10+ * fix newer innodb insert buffers * agent mysql to snmp extend --- snmp/mysql | 503 +++++++++++++++++++++++++++++++++++++---------------- 1 file changed, 351 insertions(+), 152 deletions(-) diff --git a/snmp/mysql b/snmp/mysql index 8a2d05a97..e08ed6a7d 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -1,28 +1,17 @@ #!/usr/bin/env php true, # Do you want to check binary logging? 'slave' => true, # Do you want to check slave status? 'procs' => true, # Do you want to check SHOW PROCESSLIST? + 'get_qrt' => true, # Get query response times from Percona Server or MariaDB? ); $use_ss = FALSE; # Whether to use the script server or not @@ -71,9 +73,13 @@ $version = "1.1.7"; # ============================================================================ # Include settings from an external config file (issue 39). # ============================================================================ +if ($check_mk) { + echo("<<>>\n"); +} if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); + debug('Found configuration file ' . __FILE__ . '.cnf'); } else { echo("No ".__FILE__ . ".cnf found!\n"); exit(); @@ -111,6 +117,19 @@ function error_handler($errno, $errstr, $errfile, $errline) { # } #} +# ============================================================================ +# Set the default timezone either to the configured, system timezone, or the +# default set above in the script. +# ============================================================================ +if ( function_exists("date_default_timezone_set") + && function_exists("date_default_timezone_get") ) { + $tz = ($timezone ? $timezone : @date_default_timezone_get()); + if ( $tz ) { + @date_default_timezone_set($tz); + } +} + + # ============================================================================ # Make sure we can also be called as a script. # ============================================================================ @@ -168,7 +187,7 @@ if (!function_exists('array_change_key_case') ) { # ============================================================================ function validate_options($options) { debug($options); - $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port'); + $opts = array('items', 'user', 'pass', 'heartbeat', 'nocache', 'port', 'server-id'); # Required command-line options foreach ( array() as $option ) { if (!isset($options[$option]) || !$options[$option] ) { @@ -186,21 +205,23 @@ function validate_options($options) { # Print out a brief usage summary # ============================================================================ function usage($message) { - global $mysql_host, $mysql_user, $mysql_pass, $mysql_port, $heartbeat; + global $mysql_host, $mysql_user, $mysql_pass, $mysql_port; $usage = << --items [OPTION] - - --host Hostname to connect to; use host:port syntax to specify a port - Use :/path/to/socket if you want to connect via a UNIX socket - --items Comma-separated list of the items whose data you want - --user MySQL username; defaults to $mysql_user if not given - --pass MySQL password; defaults to $mysql_pass if not given - --heartbeat MySQL heartbeat table; defaults to '$heartbeat' (see mk-heartbeat) - --nocache Do not cache results in a file - --port MySQL port; defaults to $mysql_port if not given - --mysql_ssl Add the MYSQL_CLIENT_SSL flag to mysql_connect() call +Usage: php ss_get_mysql_stats.php --host --items [OPTION] + + --host MySQL host + --items Comma-separated list of the items whose data you want + --user MySQL username + --pass MySQL password + --port MySQL port + --socket MySQL socket + --flags MySQL flags + --connection-timeout MySQL connection timeout + --server-id Server id to associate with a heartbeat if heartbeat usage is enabled + --nocache Do not cache results in a file + --help Show usage EOF; die($usage); @@ -252,8 +273,11 @@ function parse_cmdline( $args ) { # ============================================================================ function ss_get_mysql_stats( $options ) { # Process connection options and connect to MySQL. - global $debug, $mysql_user, $mysql_pass, $heartbeat, $cache_dir, $cache_time, - $chk_options, $mysql_host, $mysql_port, $mysql_ssl; + global $debug, $mysql_host, $mysql_user, $mysql_pass, $cache_dir, $poll_time, $chk_options, + $mysql_port, $mysql_socket, $mysql_flags, + $mysql_ssl, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, + $mysql_connection_timeout, + $heartbeat, $heartbeat_table, $heartbeat_server_id, $heartbeat_utc; # Connect to MySQL. $user = isset($options['user']) ? $options['user'] : $mysql_user; @@ -261,26 +285,15 @@ function ss_get_mysql_stats( $options ) { $port = isset($options['port']) ? $options['port'] : $mysql_port; $host = isset($options['host']) ? $options['host'] : $mysql_host; - $heartbeat = isset($options['heartbeat']) ? $options['heartbeat'] : $heartbeat; + $socket = isset($options['socket']) ? $options['socket'] : $mysql_socket; + $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; + $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; + $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); - debug(array('connecting to', $host_str, $user, $pass)); - if (!extension_loaded('mysqli') ) { - debug("The MySQL extension is not loaded"); - die("The MySQL extension is not loaded"); - } - if ($mysql_ssl || (isset($options['mysql_ssl']) && $options['mysql_ssl']) ) { - $conn = ((($GLOBALS["___mysqli_ston"] = mysqli_init()) && (mysqli_real_connect($GLOBALS["___mysqli_ston"], $host_str, - $user, $pass, NULL, 3306, NULL, MYSQLI_CLIENT_SSL))) ? $GLOBALS["___mysqli_ston"] : FALSE); - } - else { - $conn = ($GLOBALS["___mysqli_ston"] = mysqli_connect($host_str, $user, $pass)); - } - if (!$conn ) { - die("MySQL: " . ((is_object($GLOBALS["___mysqli_ston"])) ? mysqli_error($GLOBALS["___mysqli_ston"]) : - (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false))); - } + $sanitized_host = str_replace(array(":", "/"), array("", "_"), $host); $cache_file = "$cache_dir/agent-local-mysql"; @@ -288,12 +301,12 @@ function ss_get_mysql_stats( $options ) { # First, check the cache. $fp = null; - if (!isset($options['nocache']) ) { - if ($fp = fopen($cache_file, 'a+') ) { + if ( $cache_dir && !array_key_exists('nocache', $options) ) { + if ( $fp = fopen($cache_file, 'a+') ) { $locked = flock($fp, 1); # LOCK_SH - if ($locked ) { - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( $locked ) { + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -303,12 +316,12 @@ function ss_get_mysql_stats( $options ) { else { debug("The cache file seems too small or stale"); # Escalate the lock to exclusive, so we can write to it. - if (flock($fp, 2) ) { # LOCK_EX + if ( flock($fp, 2) ) { # LOCK_EX # We might have blocked while waiting for that LOCK_EX, and # another process ran and updated it. Let's see if we can just # return the data now: - if (filesize($cache_file) > 0 - && filectime($cache_file) + ($cache_time) > time() + if ( filesize($cache_file) > 0 + && filectime($cache_file) + ($poll_time/2) > time() && ($arr = file($cache_file)) ) {# The cache file is good to use. debug("Using the cache file"); @@ -320,48 +333,79 @@ function ss_get_mysql_stats( $options ) { } } else { - debug("Couldn't lock the cache file, ignoring it."); $fp = null; + debug("Couldn't lock the cache file, ignoring it"); } } + else { + $fp = null; + debug("Couldn't open the cache file"); + } } else { - $fp = null; - debug("Couldn't open the cache file"); + debug("Caching is disabled."); } + # Connect to MySQL. + debug(array('Connecting to', $host, $port, $user, $pass)); + if ( !extension_loaded('mysqli') ) { + debug("PHP MySQLi extension is not loaded"); + die("PHP MySQLi extension is not loaded"); + } + if ( $mysql_ssl ) { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + else { + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); + mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); + } + if ( mysqli_connect_errno() ) { + debug("MySQL connection failed: " . mysqli_connect_error()); + die("ERROR: " . mysqli_connect_error()); + } + + # MySQL server version. + # The form of this version number is main_version * 10000 + minor_version * 100 + sub_version + # i.e. version 5.5.44 is 50544. + $mysql_version = mysqli_get_server_version($conn); + debug("MySQL server version is " . $mysql_version); + # Set up variables. $status = array( # Holds the result of SHOW STATUS, SHOW INNODB STATUS, etc # Define some indexes so they don't cause errors with += operations. 'relay_log_space' => null, 'binary_log_space' => null, - 'current_transactions' => null, - 'locked_transactions' => null, - 'active_transactions' => null, - 'innodb_locked_tables' => null, - 'innodb_tables_in_use' => null, - 'innodb_lock_structs' => null, - 'innodb_lock_wait_secs' => null, - 'innodb_sem_waits' => null, - 'innodb_sem_wait_time_ms'=> null, + 'current_transactions' => 0, + 'locked_transactions' => 0, + 'active_transactions' => 0, + 'innodb_locked_tables' => 0, + 'innodb_tables_in_use' => 0, + 'innodb_lock_structs' => 0, + 'innodb_lock_wait_secs' => 0, + 'innodb_sem_waits' => 0, + 'innodb_sem_wait_time_ms'=> 0, # Values for the 'state' column from SHOW PROCESSLIST (converted to # lowercase, with spaces replaced by underscores) - 'State_closing_tables' => null, - 'State_copying_to_tmp_table' => null, - 'State_end' => null, - 'State_freeing_items' => null, - 'State_init' => null, - 'State_locked' => null, - 'State_login' => null, - 'State_preparing' => null, - 'State_reading_from_net' => null, - 'State_sending_data' => null, - 'State_sorting_result' => null, - 'State_statistics' => null, - 'State_updating' => null, - 'State_writing_to_net' => null, - 'State_none' => null, - 'State_other' => null, # Everything not listed above + 'State_closing_tables' => 0, + 'State_copying_to_tmp_table' => 0, + 'State_end' => 0, + 'State_freeing_items' => 0, + 'State_init' => 0, + 'State_locked' => 0, + 'State_login' => 0, + 'State_preparing' => 0, + 'State_reading_from_net' => 0, + 'State_sending_data' => 0, + 'State_sorting_result' => 0, + 'State_statistics' => 0, + 'State_updating' => 0, + 'State_writing_to_net' => 0, + 'State_none' => 0, + 'State_other' => 0, # Everything not listed above ); # Get SHOW STATUS and convert the name-value array into a simple @@ -378,8 +422,15 @@ function ss_get_mysql_stats( $options ) { } # Get SHOW SLAVE STATUS, and add it to the $status array. - if ($chk_options['slave'] ) { - $result = run_query("SHOW SLAVE STATUS", $conn); + if ( $chk_options['slave'] ) { + # Leverage lock-free SHOW SLAVE STATUS if available + $result = run_query("SHOW SLAVE STATUS NONBLOCKING", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS NOLOCK", $conn); + if ( !$result ) { + $result = run_query("SHOW SLAVE STATUS", $conn); + } + } $slave_status_rows_gotten = 0; foreach ( $result as $row ) { $slave_status_rows_gotten++; @@ -390,23 +441,30 @@ function ss_get_mysql_stats( $options ) { $status['slave_lag'] = $row['seconds_behind_master']; # Check replication heartbeat, if present. - if ($heartbeat ) { + if ( $heartbeat ) { + if ( $heartbeat_utc ) { + $now_func = 'UNIX_TIMESTAMP(UTC_TIMESTAMP)'; + } + else { + $now_func = 'UNIX_TIMESTAMP()'; + } $result2 = run_query( - "SELECT GREATEST(0, UNIX_TIMESTAMP() - UNIX_TIMESTAMP(ts) - 1)" - . " AS delay FROM $heartbeat WHERE id = 1", $conn); + "SELECT MAX($now_func - ROUND(UNIX_TIMESTAMP(ts)))" + . " AS delay FROM $heartbeat_table" + . " WHERE $heartbeat_server_id = 0 OR server_id = $heartbeat_server_id", $conn); $slave_delay_rows_gotten = 0; foreach ( $result2 as $row2 ) { $slave_delay_rows_gotten++; - if ($row2 && is_array($row2) + if ( $row2 && is_array($row2) && array_key_exists('delay', $row2) ) { $status['slave_lag'] = $row2['delay']; } else { - debug("Couldn't get slave lag from $heartbeat"); + debug("Couldn't get slave lag from $heartbeat_table"); } } - if ($slave_delay_rows_gotten == 0 ) { + if ( $slave_delay_rows_gotten == 0 ) { debug("Got nothing from heartbeat query"); } } @@ -417,11 +475,11 @@ function ss_get_mysql_stats( $options ) { $status['slave_stopped'] = ($row['slave_sql_running'] == 'Yes') ? 0 : $status['slave_lag']; } - if ($slave_status_rows_gotten == 0 ) { + if ( $slave_status_rows_gotten == 0 ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) @@ -445,18 +503,22 @@ function ss_get_mysql_stats( $options ) { # Get SHOW PROCESSLIST and aggregate it by state, then add it to the array # too. - if ($chk_options['procs'] ) { + if ( $chk_options['procs'] ) { $result = run_query('SHOW PROCESSLIST', $conn); foreach ( $result as $row ) { $state = $row['State']; - if (is_null($state) ) { + if ( is_null($state) ) { $state = 'NULL'; } - if ($state == '' ) { + if ( $state == '' ) { $state = 'none'; } + # MySQL 5.5 replaces the 'Locked' state with a variety of "Waiting for + # X lock" types of statuses. Wrap these all back into "Locked" because + # we don't really care about the type of locking it is. + $state = preg_replace('/^(Table lock|Waiting for .*lock)$/', 'Locked', $state); $state = str_replace(' ', '_', strtolower($state)); - if (array_key_exists("State_$state", $status) ) { + if ( array_key_exists("State_$state", $status) ) { increment($status, "State_$state", 1); } else { @@ -465,15 +527,63 @@ function ss_get_mysql_stats( $options ) { } } + # Get SHOW ENGINES to be able to determine whether InnoDB is present. + $engines = array(); + $result = run_query("SHOW ENGINES", $conn); + foreach ( $result as $row ) { + $engines[$row[0]] = $row[1]; + } + # Get SHOW INNODB STATUS and extract the desired metrics from it, then add # those to the array too. if ($chk_options['innodb'] - && array_key_exists('have_innodb', $status) - && $status['have_innodb'] == 'YES' + && array_key_exists('InnoDB', $engines) + && ( $engines['InnoDB'] == 'YES' + || $engines['InnoDB'] == 'DEFAULT' ) ) { $result = run_query("SHOW /*!50000 ENGINE*/ INNODB STATUS", $conn); $istatus_text = $result[0]['Status']; - $istatus_vals = get_innodb_array($istatus_text); + $istatus_vals = get_innodb_array($istatus_text, $mysql_version); + + # Get response time histogram from Percona Server or MariaDB if enabled. + if ( $chk_options['get_qrt'] + && (( isset($status['have_response_time_distribution']) + && $status['have_response_time_distribution'] == 'YES') + || (isset($status['query_response_time_stats']) + && $status['query_response_time_stats'] == 'ON')) ) + { + debug('Getting query time histogram'); + $i = 0; + $result = run_query( + "SELECT `count`, ROUND(total * 1000000) AS total " + . "FROM INFORMATION_SCHEMA.QUERY_RESPONSE_TIME " + . "WHERE `time` <> 'TOO LONG'", + $conn); + foreach ( $result as $row ) { + if ( $i > 13 ) { + # It's possible that the number of rows returned isn't 14. + # Don't add extra status counters. + break; + } + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = $row['count']; + $status[$total_key] = $row['total']; + $i++; + } + # It's also possible that the number of rows returned is too few. + # Don't leave any status counters unassigned; it will break graphs. + while ( $i <= 13 ) { + $count_key = sprintf("Query_time_count_%02d", $i); + $total_key = sprintf("Query_time_total_%02d", $i); + $status[$count_key] = 0; + $status[$total_key] = 0; + $i++; + } + } + else { + debug('Not getting time histogram because it is not enabled'); + } # Override values from InnoDB parsing with values from SHOW STATUS, # because InnoDB status might not have everything and the SHOW STATUS is @@ -494,6 +604,8 @@ function ss_get_mysql_stats( $options ) { 'Innodb_rows_inserted' => 'rows_inserted', 'Innodb_rows_read' => 'rows_read', 'Innodb_rows_updated' => 'rows_updated', + 'Innodb_buffer_pool_reads' => 'pool_reads', + 'Innodb_buffer_pool_read_requests' => 'pool_read_requests', ); # If the SHOW STATUS value exists, override... @@ -536,9 +648,9 @@ function ss_get_mysql_stats( $options ) { } # Define the variables to output. I use shortened variable names so maybe - # it'll all fit in 1024 bytes for Cactid and Spine's benefit. This list must - # come right after the word MAGIC_VARS_DEFINITIONS. The Perl script parses - # it and uses it as a Perl variable. + # it'll all fit in 1024 bytes for Cactid and Spine's benefit. + # This list must come right after the word MAGIC_VARS_DEFINITIONS. The Perl script + # parses it and uses it as a Perl variable. $keys = array( 'Key_read_requests' => 'a0', 'Key_reads' => 'a1', @@ -650,7 +762,6 @@ function ss_get_mysql_stats( $options ) { 'binary_log_space' => 'cz', 'innodb_locked_tables' => 'd0', 'innodb_lock_structs' => 'd1', - 'State_closing_tables' => 'd2', 'State_copying_to_tmp_table' => 'd3', 'State_end' => 'd4', @@ -667,7 +778,6 @@ function ss_get_mysql_stats( $options ) { 'State_writing_to_net' => 'df', 'State_none' => 'dg', 'State_other' => 'dh', - 'Handler_commit' => 'di', 'Handler_delete' => 'dj', 'Handler_discover' => 'dk', @@ -709,6 +819,53 @@ function ss_get_mysql_stats( $options ) { 'key_buffer_size' => 'ei', 'Innodb_row_lock_time' => 'ej', 'Innodb_row_lock_waits' => 'ek', + + # Values not parsed by LibreNMS + 'Query_time_count_00' => 'ol', + 'Query_time_count_01' => 'om', + 'Query_time_count_02' => 'on', + 'Query_time_count_03' => 'oo', + 'Query_time_count_04' => 'op', + 'Query_time_count_05' => 'oq', + 'Query_time_count_06' => 'or', + 'Query_time_count_07' => 'os', + 'Query_time_count_08' => 'ot', + 'Query_time_count_09' => 'ou', + 'Query_time_count_10' => 'ov', + 'Query_time_count_11' => 'ow', + 'Query_time_count_12' => 'ox', + 'Query_time_count_13' => 'oy', + 'Query_time_total_00' => 'oz', + 'Query_time_total_01' => 'pg', + 'Query_time_total_02' => 'ph', + 'Query_time_total_03' => 'pi', + 'Query_time_total_04' => 'pj', + 'Query_time_total_05' => 'pk', + 'Query_time_total_06' => 'pl', + 'Query_time_total_07' => 'pm', + 'Query_time_total_08' => 'pn', + 'Query_time_total_09' => 'po', + 'Query_time_total_10' => 'pp', + 'Query_time_total_11' => 'pq', + 'Query_time_total_12' => 'pr', + 'Query_time_total_13' => 'ps', + 'wsrep_replicated_bytes' => 'pt', + 'wsrep_received_bytes' => 'pu', + 'wsrep_replicated' => 'pv', + 'wsrep_received' => 'pw', + 'wsrep_local_cert_failures' => 'px', + 'wsrep_local_bf_aborts' => 'py', + 'wsrep_local_send_queue' => 'pz', + 'wsrep_local_recv_queue' => 'qg', + 'wsrep_cluster_size' => 'qh', + 'wsrep_cert_deps_distance' => 'qi', + 'wsrep_apply_window' => 'qj', + 'wsrep_commit_window' => 'qk', + 'wsrep_flow_control_paused' => 'ql', + 'wsrep_flow_control_sent' => 'qm', + 'wsrep_flow_control_recv' => 'qn', + 'pool_reads' => 'qo', + 'pool_read_requests' => 'qp', ); # Return the output. @@ -737,7 +894,7 @@ function ss_get_mysql_stats( $options ) { # MySQL 5.0, and XtraDB or enhanced InnoDB from Percona if applicable. Note # that extra leading spaces are ignored due to trim(). # ============================================================================ -function get_innodb_array($text) { +function get_innodb_array($text, $mysql_version) { $results = array( 'spin_waits' => array(), 'spin_rounds' => array(), @@ -811,13 +968,26 @@ function get_innodb_array($text) { $results['spin_rounds'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[8]); } - elseif (strpos($line, 'RW-shared spins') === 0 ) { + elseif (strpos($line, 'RW-shared spins') === 0 + && strpos($line, ';') > 0 ) { # RW-shared spins 3859028, OS waits 2100750; RW-excl spins 4641946, OS waits 1530310 $results['spin_waits'][] = to_int($row[2]); $results['spin_waits'][] = to_int($row[8]); $results['os_waits'][] = to_int($row[5]); $results['os_waits'][] = to_int($row[11]); } + elseif (strpos($line, 'RW-shared spins') === 0 && strpos($line, '; RW-excl spins') === FALSE) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-shared spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } + elseif (strpos($line, 'RW-excl spins') === 0) { + # Post 5.5.17 SHOW ENGINE INNODB STATUS syntax + # RW-excl spins 604733, rounds 8107431, OS waits 241268 + $results['spin_waits'][] = to_int($row[2]); + $results['os_waits'][] = to_int($row[7]); + } elseif (strpos($line, 'seconds the semaphore:') > 0) { # --Thread 907205 has waited at handler/ha_innodb.cc line 7156 for 1.00 seconds the semaphore: increment($results, 'innodb_sem_waits', 1); @@ -826,18 +996,35 @@ function get_innodb_array($text) { } # TRANSACTIONS - elseif (strpos($line, 'Trx id counter') === 0 ) { + elseif ( strpos($line, 'Trx id counter') === 0 ) { # The beginning of the TRANSACTIONS section: start counting # transactions - # Trx id counter 0 1170664159 - # Trx id counter 861B144C - $results['innodb_transactions'] = make_bigint($row[3], $row[4]); + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Trx id counter 0 1170664159 + # Trx id counter 861B144C + $results['innodb_transactions'] = isset($row[4]) ? make_bigint( + $row[3], $row[4]) : base_convert($row[3], 16, 10); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Trx id counter 2903813 + $results['innodb_transactions'] = $row[3]; + } $txn_seen = TRUE; } - elseif (strpos($line, 'Purge done for trx') === 0 ) { - # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 - # Purge done for trx's n:o < 861B135D undo n:o < 0 - $purged_to = make_bigint($row[6], $row[7] == 'undo' ? null : $row[7]); + elseif ( strpos($line, 'Purge done for trx') === 0 ) { + if ( $mysql_version < 50600 ) { + # For versions prior 5.6: two decimals or one hex + # Purge done for trx's n:o < 0 1170663853 undo n:o < 0 0 + # Purge done for trx's n:o < 861B135D undo n:o < 0 + $purged_to = $row[7] == 'undo' ? base_convert($row[6], 16, 10) : make_bigint($row[6], $row[7]); + } + else { + # For versions 5.6+ and MariaDB 10.x: one decimal + # Purge done for trx's n:o < 2903354 undo n:o < 0 state: running but idle + $purged_to = $row[6]; + } $results['unpurged_txns'] = big_sub($results['innodb_transactions'], $purged_to); } @@ -845,31 +1032,31 @@ function get_innodb_array($text) { # History list length 132 $results['history_list'] = to_int($row[3]); } - elseif ($txn_seen && strpos($line, '---TRANSACTION') === 0 ) { + elseif ( $txn_seen && strpos($line, '---TRANSACTION') === 0 ) { # ---TRANSACTION 0, not started, process no 13510, OS thread id 1170446656 increment($results, 'current_transactions', 1); - if (strpos($line, 'ACTIVE') > 0 ) { + if ( strpos($line, 'ACTIVE') > 0 ) { increment($results, 'active_transactions', 1); } } - elseif ($txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { + elseif ( $txn_seen && strpos($line, '------- TRX HAS BEEN') === 0 ) { # ------- TRX HAS BEEN WAITING 32 SEC FOR THIS LOCK TO BE GRANTED: increment($results, 'innodb_lock_wait_secs', to_int($row[5])); } - elseif (strpos($line, 'read views open inside InnoDB') > 0 ) { + elseif ( strpos($line, 'read views open inside InnoDB') > 0 ) { # 1 read views open inside InnoDB $results['read_views'] = to_int($row[0]); } - elseif (strpos($line, 'mysql tables in use') === 0 ) { + elseif ( strpos($line, 'mysql tables in use') === 0 ) { # mysql tables in use 2, locked 2 increment($results, 'innodb_tables_in_use', to_int($row[4])); increment($results, 'innodb_locked_tables', to_int($row[6])); } - elseif ($txn_seen && strpos($line, 'lock struct(s)') > 0 ) { + elseif ( $txn_seen && strpos($line, 'lock struct(s)') > 0 ) { # 23 lock struct(s), heap size 3024, undo log entries 27 # LOCK WAIT 12 lock struct(s), heap size 3024, undo log entries 5 # LOCK WAIT 2 lock struct(s), heap size 368 - if (strpos($line, 'LOCK WAIT') === 0 ) { + if ( strpos($line, 'LOCK WAIT') === 0 ) { increment($results, 'innodb_lock_structs', to_int($row[2])); increment($results, 'locked_transactions', 1); } @@ -896,7 +1083,7 @@ function get_innodb_array($text) { $results['pending_aio_log_ios'] = to_int($row[6]); $results['pending_aio_sync_ios'] = to_int($row[9]); } - elseif (strpos($line, 'Pending flushes (fsync)') === 0 ) { + elseif ( strpos($line, 'Pending flushes (fsync)') === 0 ) { # Pending flushes (fsync) log: 0; buffer pool: 0 $results['pending_log_flushes'] = to_int($row[4]); $results['pending_buf_pool_flushes'] = to_int($row[7]); @@ -917,6 +1104,16 @@ function get_innodb_array($text) { $results['ibuf_used_cells'] = to_int($row[2]); $results['ibuf_free_cells'] = to_int($row[6]); $results['ibuf_cell_count'] = to_int($row[9]); + if (strpos($line, 'merges')) { + $results['ibuf_merges'] = to_int($row[10]); + } + } + elseif (strpos($line, ', delete mark ') > 0 && strpos($prev_line, 'merged operations:') === 0 ) { + # Output of show engine innodb status has changed in 5.5 + # merged operations: + # insert 593983, delete mark 387006, delete 73092 + $results['ibuf_inserts'] = to_int($row[1]); + $results['ibuf_merged'] = to_int($row[1]) + to_int($row[4]) + to_int($row[6]); } elseif (strpos($line, ' merged recs, ') > 0 ) { # 19817685 inserts, 19817684 merged recs, 3552620 merges @@ -972,40 +1169,41 @@ function get_innodb_array($text) { } # BUFFER POOL AND MEMORY - elseif (strpos($line, "Total memory allocated") === 0 ) { + elseif (strpos($line, "Total memory allocated") === 0 && strpos($line, "in additional pool allocated") > 0 ) { # Total memory allocated 29642194944; in additional pool allocated 0 + # Total memory allocated by read views 96 $results['total_mem_alloc'] = to_int($row[3]); $results['additional_pool_alloc'] = to_int($row[8]); } - elseif (strpos($line, 'Adaptive hash index ') === 0 ) { + elseif(strpos($line, 'Adaptive hash index ') === 0 ) { # Adaptive hash index 1538240664 (186998824 + 1351241840) $results['adaptive_hash_memory'] = to_int($row[3]); } - elseif (strpos($line, 'Page hash ') === 0 ) { + elseif(strpos($line, 'Page hash ') === 0 ) { # Page hash 11688584 $results['page_hash_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Dictionary cache ') === 0 ) { + elseif(strpos($line, 'Dictionary cache ') === 0 ) { # Dictionary cache 145525560 (140250984 + 5274576) $results['dictionary_cache_memory'] = to_int($row[2]); } - elseif (strpos($line, 'File system ') === 0 ) { + elseif(strpos($line, 'File system ') === 0 ) { # File system 313848 (82672 + 231176) $results['file_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Lock system ') === 0 ) { + elseif(strpos($line, 'Lock system ') === 0 ) { # Lock system 29232616 (29219368 + 13248) $results['lock_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Recovery system ') === 0 ) { + elseif(strpos($line, 'Recovery system ') === 0 ) { # Recovery system 0 (0 + 0) $results['recovery_system_memory'] = to_int($row[2]); } - elseif (strpos($line, 'Threads ') === 0 ) { + elseif(strpos($line, 'Threads ') === 0 ) { # Threads 409336 (406936 + 2400) $results['thread_hash_memory'] = to_int($row[1]); } - elseif (strpos($line, 'innodb_io_pattern ') === 0 ) { + elseif(strpos($line, 'innodb_io_pattern ') === 0 ) { # innodb_io_pattern 0 (0 + 0) $results['innodb_io_pattern_memory'] = to_int($row[1]); } @@ -1053,6 +1251,7 @@ function get_innodb_array($text) { $results['queries_inside'] = to_int($row[0]); $results['queries_queued'] = to_int($row[4]); } + $prev_line = $line; } foreach ( array('spin_waits', 'spin_rounds', 'os_waits') as $key ) { @@ -1063,16 +1262,9 @@ function get_innodb_array($text) { $results['uncheckpointed_bytes'] = big_sub($results['log_bytes_written'], $results['last_checkpoint']); - -# foreach ($results as $key => $value) { -# echo(strtolower($key).":".strtolower($value)."\n"); -# } - - return $results; } - # ============================================================================ # Returns a bigint from two ulint or a single hex number. This is tested in # t/mysql_stats.php and copied, without tests, to ss_get_by_ssh.php. @@ -1117,27 +1309,34 @@ function to_int ( $str ) { # ============================================================================ # Wrap mysql_query in error-handling, and instead of returning the result, # return an array of arrays in the result. +# ============================================================================ + # ============================================================================ function run_query($sql, $conn) { global $debug; debug($sql); - $result = @mysqli_query( $conn, $sql); - if ($debug ) { - $error = @((is_object($conn)) ? mysqli_error($conn) : (($___mysqli_res = mysqli_connect_error()) ? $___mysqli_res : false)); - if ($error ) { + $result = @mysqli_query($conn, $sql); + if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { + $error = @mysqli_error($conn); + if ( $error ) { debug(array($sql, $error)); die("SQLERR $error in $sql"); } } $array = array(); - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } debug(array($sql, $array)); return $array; } -# ============================================================================ # Safely increments a value that might be null. # ============================================================================ function increment(&$arr, $key, $howmuch) { From 636e759045adf9e7ef278a7f3be09ae1e05db781 Mon Sep 17 00:00:00 2001 From: VVelox Date: Wed, 9 Jan 2019 23:41:39 -0600 Subject: [PATCH 184/497] various misc fixes for the postfix poller (#112) * update postfix * move a few things to reduce the number of changed lines * move mself to the end * white space cleanup and another small cleanup of $chr * use $chrNew instead of $chrC when writing the current values * more white space cleanup * replace one more missed instance of iuoscp --- snmp/postfixdetailed | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) mode change 100644 => 100755 snmp/postfixdetailed diff --git a/snmp/postfixdetailed b/snmp/postfixdetailed old mode 100644 new mode 100755 index 9bf71e9c7..e6cb1e9ea --- a/snmp/postfixdetailed +++ b/snmp/postfixdetailed @@ -86,10 +86,10 @@ my ( $received, $rardnf, $rarnfqa, $iuscp, - $msefl, $sce, $scp, - $urr) = split ( /\n/, $old ); + $urr, + $msefl) = split ( /\n/, $old ); if ( ! defined( $received ) ){ $received=0; } if ( ! defined( $delivered ) ){ $delivered=0; } @@ -142,7 +142,6 @@ my $recipientsC=0; my $recipienthdC=0; my $deferralcrC=0; my $deferralhidC=0; -my $chrC=0; my $hcrnfqhC=0; my $sardnfC=0; my $sarnobuC=0; @@ -195,12 +194,13 @@ sub newValue{ my $output=`$pflogsumm /var/log/maillog`; -#holds client host rejected values till the end when it is compared to the old one -my $chrNew=0; - #holds RBL values till the end when it is compared to the old one my $buNew=0; + +#holds client host rejected values till the end when it is compared to the old one +my $chrNew=0; + # holds recipient address rejected values till the end when it is compared to the old one my $raruuNew=0; @@ -353,6 +353,7 @@ while ( defined( $outputA[$int] ) ){ # deferrals Host is down if ( ( $line =~ /Host is down$/ ) && ( ! $handled ) ){ $line=~s/ .*//; + $deferralcrC=$line; $deferralhidC=$line; $deferralhid=newValue( $deferralhid, $line ); $handled=1; @@ -429,8 +430,8 @@ while ( defined( $outputA[$int] ) ){ #Improper use of SMTP command pipelining if ( ( $line =~ /Improper use of SMTP command pipelining/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; - $iuoscpC=$line; - $iuoscp=newValue( $iuoscp, $line ); + $iuscpC=$line; + $iuscp=newValue( $iuscp, $line ); } #Message size exceeds fixed limit @@ -453,16 +454,18 @@ while ( defined( $outputA[$int] ) ){ $scpC=$line; $scp=newValue( $scp, $line ); } - + #unknown reject reason if ( ( $line =~ /unknown reject reason/ ) && ( ! $handled ) ){ $line=~s/.*\: //g; $urrC=$line; $urr=newValue( $urr, $line ); } + $int++; } + # final client host rejected total $chr=newValue( $chr, $chrNew ); @@ -502,8 +505,8 @@ my $data=$received."\n". $iuscp."\n". $sce."\n". $scp."\n". - $urr."\n"; - $msefl."\n". + $urr."\n". + $msefl."\n"; print $data; @@ -535,10 +538,10 @@ my $current=$receivedC."\n". $rardnfC."\n". $rarnfqaC."\n". $iuscpC."\n". - $mseflC."\n". $sceC."\n". $scpC."\n". - $urrC."\n"; + $urrC."\n". + $mseflC."\n"; open(my $fh, ">", $cache) or die "Can't open '".$cache."'"; print $fh $current; From 0dfec428d1901a7dbdd3afcf1f2c8d87627fa448 Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Thu, 17 Jan 2019 11:44:02 -0600 Subject: [PATCH 185/497] Update powerdns script to json (#218) --- snmp/powerdns.php | 76 ----------------------------------------------- snmp/powerdns.py | 26 ++++++++++++++++ 2 files changed, 26 insertions(+), 76 deletions(-) delete mode 100755 snmp/powerdns.php create mode 100755 snmp/powerdns.py diff --git a/snmp/powerdns.php b/snmp/powerdns.php deleted file mode 100755 index 14103124c..000000000 --- a/snmp/powerdns.php +++ /dev/null @@ -1,76 +0,0 @@ -#!/usr/bin/env php - - -// START SETTINGS /// -$pdnscontrol = '/usr/bin/pdns_control'; -// END SETTINGS /// - -// DO NOT EDIT UNDER THIS LINE -// -$cmd = shell_exec($pdnscontrol.' show \*'); -$vars = array(); -$vars = explode(',', $cmd); - - -function doSNMP($vars) { - foreach ($vars as $item => $value) { - $value = trim($value); - if (!empty($value)) { - echo $value."\n"; - } - } - -}//end doSNMP() - -function doSNMPv2($vars) { - $pdns = array(); - foreach ($vars as $item => $value) { - if (!empty($value)) { - $temp = explode('=', $value); - if (isset($temp[1])) { - $pdns[$temp[0]] = $temp[1]; - } - } - } - - $var = array(); - $var['corrupt-packets'] = (isset($pdns['corrupt-packets']) ? $pdns['corrupt-packets'] : 'U'); - $var['deferred-cache-inserts'] = (isset($pdns['deferred-cache-inserts']) ? $pdns['deferred-cache-inserts'] : 'U'); - $var['deferred-cache-lookup'] = (isset($pdns['deferred-cache-lookup']) ? $pdns['deferred-cache-lookup'] : 'U'); - $var['latency'] = (isset($pdns['latency']) ? $pdns['latency'] : 'U'); - $var['packetcache-hit'] = (isset($pdns['packetcache-hit']) ? $pdns['packetcache-hit'] : 'U'); - $var['packetcache-miss'] = (isset($pdns['packetcache-miss']) ? $pdns['packetcache-miss'] : 'U'); - $var['packetcache-size'] = (isset($pdns['packetcache-size']) ? $pdns['packetcache-size'] : 'U'); - $var['qsize-q'] = (isset($pdns['qsize-q']) ? $pdns['qsize-q'] : 'U'); - $var['query-cache-hit'] = (isset($pdns['query-cache-hit']) ? $pdns['query-cache-hit'] : 'U'); - $var['query-cache-miss'] = (isset($pdns['query-cache-miss']) ? $pdns['query-cache-miss'] : 'U'); - $var['recursing-answers'] = (isset($pdns['recursing-answers']) ? $pdns['recursing-answers'] : 'U'); - $var['recursing-questions'] = (isset($pdns['recursing-questions']) ? $pdns['recursing-questions'] : 'U'); - $var['servfail-packets'] = (isset($pdns['servfail-packets']) ? $pdns['servfail-packets'] : 'U'); - $var['tcp-answers'] = (isset($pdns['tcp-answers']) ? $pdns['tcp-answers'] : 'U'); - $var['tcp-queries'] = (isset($pdns['tcp-queries']) ? $pdns['tcp-queries'] : 'U'); - $var['timedout-packets'] = (isset($pdns['timedout-packets']) ? $pdns['timedout-packets'] : 'U'); - $var['udp-answers'] = (isset($pdns['udp-answers']) ? $pdns['udp-answers'] : 'U'); - $var['udp-queries'] = (isset($pdns['udp-queries']) ? $pdns['udp-queries'] : 'U'); - $var['udp4-answers'] = (isset($pdns['udp4-answers']) ? $pdns['udp4-answers'] : 'U'); - $var['udp4-queries'] = (isset($pdns['udp4-queries']) ? $pdns['udp4-queries'] : 'U'); - $var['udp6-answers'] = (isset($pdns['udp6-answers']) ? $pdns['udp6-answers'] : 'U'); - $var['udp6-queries'] = (isset($pdns['udp6-queries']) ? $pdns['udp6-queries'] : 'U'); - foreach ($var as $item => $count) { - echo $count."\n"; - } - -}//end doSNMPv2() - - -doSNMPv2($vars); diff --git a/snmp/powerdns.py b/snmp/powerdns.py new file mode 100755 index 000000000..75cc1fae8 --- /dev/null +++ b/snmp/powerdns.py @@ -0,0 +1,26 @@ +#!/usr/bin/env python + +import json +import subprocess + +pdnscontrol = '/usr/bin/pdns_control' + +process = subprocess.Popen([pdnscontrol, 'show', '*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +input = process.communicate() +stdout = input[0].decode() +stderr = input[1].decode() + +data = {} +for var in stdout.split(','): + if '=' in var: + key, value = var.split('=') + data[key] = value + +output = { + 'version': 1, + 'error': process.returncode, + 'errorString': stderr, + 'data': data +} + +print(json.dumps(output)) From b07d09d2dd117d5ceaf5dc77e26aef753b1b52b2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Felici=C3=A1n=20Hopp=C3=A1l?= Date: Mon, 11 Feb 2019 23:06:57 +0100 Subject: [PATCH 186/497] Fix: zpool list output changed, incorrect values (#219) * fix zpool data, output of zpool list -pH changed in freebsd 11 * fix zpool data, output of zpool list -pH changed in freebsd 11 * bump version * version dump to 2 --- snmp/zfs-freebsd | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index d78658c2d..12600e1e1 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.1.0\n"; + print "FreeBSD ZFS stats extend 0.2.0\n"; } sub main::HELP_MESSAGE { @@ -236,14 +236,14 @@ my $pools_int=0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { my %newPool; - + my $pool=$pools[$pools_int]; $pool =~ s/\t/,/g; - $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\,\-\,\-\,/\,0\,0\,/g; $pool =~ s/\%//g; $pool =~ s/\,([0-1\.]*)x\,/,$1,/; - ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); push(@toShoveIntoJSON, \%newPool); @@ -253,7 +253,7 @@ $tojson{pools}=\@toShoveIntoJSON; my %head_hash; $head_hash{'data'}=\%tojson; -$head_hash{'version'}=1; +$head_hash{'version'}=2; $head_hash{'error'}=0; $head_hash{'errorString'}=''; From 44ac69db1bcb45bce2fe24b263b67dd552840e8d Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Tue, 12 Feb 2019 20:33:05 -0600 Subject: [PATCH 187/497] Use os-release whenever possible for the distro script (#220) Except centos... https://bugs.centos.org/view.php?id=8359 --- snmp/distro | 37 +++++++++++++++++++------------------ 1 file changed, 19 insertions(+), 18 deletions(-) diff --git a/snmp/distro b/snmp/distro index cd9e814bf..75fa74d2a 100755 --- a/snmp/distro +++ b/snmp/distro @@ -24,6 +24,7 @@ elif [ "${OS}" = "Linux" ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then DIST="CentOS" + IGNORE_OS_RELEASE=1 # https://bugs.centos.org/view.php?id=8359 elif [ "${DIST}" = "CloudLinux" ]; then DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then @@ -77,10 +78,6 @@ elif [ "${OS}" = "Linux" ] ; then REV=$(sed -n -e 's/^.*PHOTON_BUILD_NUMBER=//p' /etc/photon-release) IGNORE_LSB=1 # photon os does not have /etc/lsb-release nor lsb_release - elif [ -f /etc/os-release ] ; then - DIST=$(grep '^NAME=' /etc/os-release | cut -d= -f2- | tr -d '"') - REV=$(grep '^VERSION_ID=' /etc/os-release | cut -d= -f2- | tr -d '"') - elif [ -f /etc/openwrt_version ] ; then DIST="OpenWrt" REV=$(cat /etc/openwrt_version) @@ -94,29 +91,33 @@ elif [ "${OS}" = "Linux" ] ; then REV=$(echo SP$(grep PATCHLEVEL /etc/SuSE-release | cut -d = -f 2 | tr -d " ")) fi - if [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then - LSB_DIST=$(lsb_release -si) - LSB_REV=$(lsb_release -sr) - if [ "$LSB_DIST" != "" ] ; then - DIST=$LSB_DIST - fi - if [ "$LSB_REV" != "" ] ; then - REV=$LSB_REV - fi - fi - if [ -x "$(command -v awk)" ]; then # some distros do not ship with awk if [ "`uname -a | awk '{print $(NF)}'`" = "DD-WRT" ] ; then DIST="dd-wrt" - fi + fi if [ "`uname -a | awk '{print $(NF)}'`" = "ASUSWRT-Merlin" ] ; then DIST="ASUSWRT-Merlin" REV=`nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]'` > /dev/null 2>&1 fi fi - if [ -n "${REV}" ] - then + # try standardized os version methods + if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then + source /etc/os-release + STD_DIST="$NAME" + STD_REV="$VERSION_ID" + elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then + STD_DIST=$(lsb_release -si) + STD_REV=$(lsb_release -sr) + fi + if [ -n "${STD_DIST}" ]; then + DIST="${STD_DIST}" + fi + if [ -n "${STD_REV}" ]; then + REV="${STD_REV}" + fi + + if [ -n "${REV}" ]; then OSSTR="${DIST} ${REV}" else OSSTR="${DIST}" From 76c86289ad98a1e155a22c7b0fe6c4f875ed5aaf Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Sat, 16 Mar 2019 14:33:31 +0800 Subject: [PATCH 188/497] remove duplicate code --- agent-local/mysql | 11 +++-------- snmp/mysql | 11 +++-------- 2 files changed, 6 insertions(+), 16 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 3b8b30427..9277efc91 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -349,17 +349,12 @@ function ss_get_mysql_stats( $options ) { debug("PHP MySQLi extension is not loaded"); die("PHP MySQLi extension is not loaded"); } + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); if ( $mysql_ssl ) { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); - } - else { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); } + @mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); if ( mysqli_connect_errno() ) { debug("MySQL connection failed: " . mysqli_connect_error()); die("ERROR: " . mysqli_connect_error()); diff --git a/snmp/mysql b/snmp/mysql index e08ed6a7d..27833e016 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -352,17 +352,12 @@ function ss_get_mysql_stats( $options ) { debug("PHP MySQLi extension is not loaded"); die("PHP MySQLi extension is not loaded"); } + $conn = mysqli_init(); + $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); if ( $mysql_ssl ) { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); mysqli_ssl_set($conn, $mysql_ssl_key, $mysql_ssl_cert, $mysql_ssl_ca, NULL, NULL); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); - } - else { - $conn = mysqli_init(); - $conn->options(MYSQLI_OPT_CONNECT_TIMEOUT, $connection_timeout); - mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); } + @mysqli_real_connect($conn, $host, $user, $pass, NULL, $port, $socket, $flags); if ( mysqli_connect_errno() ) { debug("MySQL connection failed: " . mysqli_connect_error()); die("ERROR: " . mysqli_connect_error()); From 8ed3e6941a44f6f37ddcbaae98faa3fe99cf9b74 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 05:55:33 -0500 Subject: [PATCH 189/497] freshly initilized ZFS pulls that are not in use don't have a $data_demand_total --- snmp/zfs-freebsd | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index cea6e1e95..ee185b6a6 100755 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.0.0\n"; + print "FreeBSD ZFS stats extend 0.2.0\n"; } sub main::HELP_MESSAGE { @@ -157,7 +157,10 @@ my $real_hits = $mfu_hits + $mru_hits; my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; -my $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +my $data_demand_percent = 'U'; +if ( $demand_data_total != 0 ){ + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +} my $data_prefetch_percent; if ( $prefetch_data_total != 0 ) { @@ -236,14 +239,14 @@ my $pools_int=0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { my %newPool; - + my $pool=$pools[$pools_int]; $pool =~ s/\t/,/g; - $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\,\-\,\-\,/\,0\,0\,/g; $pool =~ s/\%//g; $pool =~ s/\,([0-1\.]*)x\,/,$1,/; - ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); + ( $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, $newPool{dedup} )=split(/\,/, $pool); push(@toShoveIntoJSON, \%newPool); @@ -251,13 +254,19 @@ while ( defined( $pools[$pools_int] ) ) { } $tojson{pools}=\@toShoveIntoJSON; +my %head_hash; +$head_hash{'data'}=\%tojson; +$head_hash{'version'}=2; +$head_hash{'error'}=0; +$head_hash{'errorString'}=''; + my $j=JSON->new; if ( $opts{p} ){ $j->pretty(1); } -print $j->encode( \%tojson ); +print $j->encode( \%head_hash ); if (! $opts{p} ){ print "\n"; From d4d0768904373d3f49189273a7357da7e0ded911 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 06:02:43 -0500 Subject: [PATCH 190/497] remove unneeded else statement and re-apply patch --- snmp/zfs-freebsd | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) mode change 100755 => 100644 snmp/zfs-freebsd diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd old mode 100755 new mode 100644 index ee185b6a6..842c255f9 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -157,16 +157,15 @@ my $real_hits = $mfu_hits + $mru_hits; my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; -my $data_demand_percent = 'U'; + +my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $data_demand_percent = $demand_data_hits / $demand_data_total * 100; + $demand_data_hits / $demand_data_total * 100; } -my $data_prefetch_percent; +my $data_prefetch_percent=0; if ( $prefetch_data_total != 0 ) { $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; -}else{ - $data_prefetch_percent = 0; } my $anon_hits_percent; From 17f8b514f29d7d185cab95f3f9dbb0d67b26dc46 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Mar 2019 06:07:59 -0500 Subject: [PATCH 191/497] merge... and update version --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 842c255f9..93e162476 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -51,7 +51,7 @@ use Getopt::Std; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS stats extend 0.2.0\n"; + print "FreeBSD ZFS stats extend 0.2.1\n"; } sub main::HELP_MESSAGE { From cf72fcfef2b39060d23d686d9b10bb89e25a71f9 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 18 Mar 2019 03:39:17 -0500 Subject: [PATCH 192/497] actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol --- snmp/portactivity | 47 ++++++++++++++++++++--------------------------- 1 file changed, 20 insertions(+), 27 deletions(-) diff --git a/snmp/portactivity b/snmp/portactivity index 430ae5190..9965e0d69 100755 --- a/snmp/portactivity +++ b/snmp/portactivity @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2018, Zane C. Bowers-Hadley +#Copyright (c) 2019, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -91,7 +91,7 @@ use Parse::Netstat qw(parse_netstat); $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Port Activity SNMP stats extend 0.0.0\n"; + print "Port Activity SNMP stats extend 0.0.1\n"; } sub main::HELP_MESSAGE { @@ -175,7 +175,7 @@ sub return_json{ } print $j->encode( \%to_return ); - + if ( ! $pretty ){ print "\n"; } @@ -218,6 +218,7 @@ my @protos_array=split(/\,/, $opts{p}); #holds the various protocol hashes my %protos; +my %proto_lookup; #make sure each one specificied is defined and build the hash that will be returned my $protos_array_int=0; @@ -234,27 +235,17 @@ while ( defined( $protos_array[$protos_array_int] ) ){ return_json(\%to_return, $opts{P}); exit 4; } - + + $proto_lookup{ $port } = $protos_array [$protos_array_int ]; + $protos_array_int++; } -my $os=$^O; +my $netstat='netstat -n'; -my $netstat; - -#make sure this is a supported OS -if ( $os eq 'freebsd' ){ - $netstat='netstat -S -p tcp' -}elsif( $os eq 'linux' ){ - $netstat='netstat -n' -}else{ - $to_return{errorString}=$os.' is not a supported OS as of currently'; - $to_return{error}=3; - return_json(\%to_return, $opts{P}); - exit 3; -} +my $os=$^O; -my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os); +my $res = parse_netstat(output => join("", `$netstat`), flavor=>$os, udp=>0, unix=>0); #check to make sure that it was able to parse the output if ( @@ -273,7 +264,9 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ my $conn=$res->[2]{active_conns}[$active_conns_int]; #we only care about TCP currently - if ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ){ + if ( defined( $conn->{proto} ) && + ( $conn->{proto} =~ /^[Tt][Cc][Pp]/ ) + ){ $protos_array_int=0; my $service; while( @@ -282,8 +275,8 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ ){ #check if this matches either ports if ( - ( $protos_array[ $protos_array_int ] eq $conn->{'local_port'} ) || - ( $protos_array[ $protos_array_int ] eq $conn->{'foreign_port'} ) + ( defined($proto_lookup{ $conn->{'local_port'} }) ) || + ( defined($proto_lookup{ $conn->{'foreign_port'} }) ) ){ $service=$protos_array[ $protos_array_int ]; } @@ -294,7 +287,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ #only handle it if is a service we are watching for if ( defined( $service ) ){ my $processed=0; - + my $state=$conn->{'state'}; #translate the state names if ( $os eq 'linux' ){ @@ -311,7 +304,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ if ( $state ne 'LISTEN' ){ $protos{$service}{'total_conns'}++; } - + #make sure the state is a valid one # if it is not a valid one, set it to other, meaning something unexpected was set for the state that should not be if ( ! defined( $valid_states{$state} ) ){ @@ -338,11 +331,11 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ $protos{$service}{'total_to'}++; $protos{$service}{'to'}{$state}++; } - + } - + } - + $active_conns_int++; } From e85eb0366e621d08f171a724a0abb2608a0f07ae Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 00:40:06 -0500 Subject: [PATCH 193/497] update the guessing to only use smartctl --scan-open and generate with more complex options --- snmp/smart | 134 +++++++++++++++++++++++++---------------------------- 1 file changed, 64 insertions(+), 70 deletions(-) diff --git a/snmp/smart b/snmp/smart index 44b7a31e7..f6c681350 100755 --- a/snmp/smart +++ b/snmp/smart @@ -73,10 +73,9 @@ my $useSN=1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.0.0\n"; + print "SMART SNMP extend 0.1.0\n"; }; - sub main::HELP_MESSAGE { print "\n". "-u Update '".$cache."'\n". @@ -108,75 +107,70 @@ if ( defined( $opts{g} ) ){ $cache='cache='.$cache."\n"; } - my %found_disks; - - #check for drives named /dev/sd* - my @matches=glob('/dev/sd*'); - @matches=grep(!/[0-9]/, @matches); - my $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } - - #check for drives named /dev/ada* - @matches=glob('/dev/ada*'); - @matches=grep(!/[ps]/, @matches); - $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } - - #check for drives named /dev/da* - @matches=glob('/dev/da*'); - @matches=grep(!/[ps]/, @matches); - $matches_int=0; - while ( defined( $matches[$matches_int] ) ){ - my $device=$matches[$matches_int]; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; - } - - $matches_int++; - } + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; #have smartctl scan and see if it finds anythings not get found my $scan_output=`$smartctl --scan-open`; my @scan_outputA=split(/\n/, $scan_output); + + # remove non-SMART devices sometimes returned @scan_outputA=grep(!/ses[0-9]/, @scan_outputA); # not a disk, but may or may not have SMART attributes @scan_outputA=grep(!/pass[0-9]/, @scan_outputA); # very likely a duplicate and a disk under another name - $matches_int=0; - while ( defined( $scan_outputA[$matches_int] ) ){ - my $device=$scan_outputA[$matches_int]; - $device =~ s/ .*//; - system( $smartctl.' -A '.$device.' > /dev/null' ); - if ( $? == 0 ){ - $device =~ s/\/dev\///; - $found_disks{$device}=1; + @scan_outputA=grep(!/cd[0-9]/, @scan_outputA); # CD drive + if ( $^O eq 'freebsd' ){ + @scan_outputA=grep(!/sa[0-9]/, @scan_outputA); # tape drive + @scan_outputA=grep(!/ctl[0-9]/, @scan_outputA); # CAM target layer + }elsif( $^O eq 'linux' ){ + @scan_outputA=grep(!/st[0-9]/, @scan_outputA); # SCSI tape drive + @scan_outputA=grep(!/ht[0-9]/, @scan_outputA); # ATA tape drive + } + + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments ( @scan_outputA ){ + my $name = $arguments; + + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if (defined( $found_disks_names{$name} )){ + $found_disks_names{$name}++; + }else{ + $found_disks_names{$name}=0; } - - $matches_int++; + + push( @argumentsA, $arguments ); + } - + + # second pass, putting the lines together + my %current_disk; + my $drive_lines=''; + foreach my $arguments ( @argumentsA ){ + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ){ + # If no other devices, just name it after the base device. + $drive_lines=$drive_lines.$name." ".$arguments."\n"; + }else{ + # if more than one, start at zero and increment, apennding comma number to the base device name + if (defined( $current_disk{$name} )){ + $current_disk{$name}++; + }else{ + $current_disk{$name}=0; + } + $drive_lines=$drive_lines.$name.",".$current_disk{$name}." ".$arguments."\n"; + } + + } + print "useSN=0\n".'smartctl='.$smartctl."\n". - $cache. - join( "\n", keys(%found_disks) )."\n"; - + $cache. + $drive_lines; + exit 0; } @@ -208,7 +202,7 @@ while ( defined( $configA[$configA_int] ) ){ if ( $var eq 'cache' ){ $cache=$val; } - + if ( $var eq 'smartctl' ){ $smartctl=$val; } @@ -216,11 +210,11 @@ while ( defined( $configA[$configA_int] ) ){ if ( $var eq 'useSN' ){ $useSN=$val; } - + if ( !defined( $val ) ){ push(@disks, $var); } - + $configA_int++; } @@ -267,7 +261,7 @@ while ( defined($disks[$int]) ) { '231'=>'null', '233'=>'null', ); - + my @outputA=split( /\n/, $output ); my $outputAint=0; while ( defined($outputA[$outputAint]) ) { @@ -281,7 +275,7 @@ while ( defined($disks[$int]) ) { my $id=$lineA[0]; # single int raw values - if ( + if ( ( $id == 5 ) || ( $id == 10 ) || ( $id == 173 ) || @@ -319,8 +313,8 @@ while ( defined($disks[$int]) ) { ) { my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; - } - + } + } $outputAint++; @@ -337,7 +331,7 @@ while ( defined($disks[$int]) ) { my $short=scalar grep(/Short/, @outputA); my $conveyance=scalar grep(/Conveyance/, @outputA); my $selective=scalar grep(/Selective/, @outputA); - + # get the drive serial number, if needed my $disk_id=$disk; if ( $useSN ){ From d17c9f54c9616f7df883da20cd5e9d84830be33e Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 02:25:17 -0500 Subject: [PATCH 194/497] tested and it appears to work properly... documentation updated --- snmp/smart | 43 +++++++++++++++++++++++++++++++++---------- 1 file changed, 33 insertions(+), 10 deletions(-) diff --git a/snmp/smart b/snmp/smart index f6c681350..2fd3c145a 100755 --- a/snmp/smart +++ b/snmp/smart @@ -38,14 +38,17 @@ will be /etc/snmp/smart.config. Alternatively you can also specific a config via Anything starting with a # is comment. The format for variables is $variable=$value. Empty lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any -line with out a = or # are treated as a disk. +line with out a matched variable or # are treated as a disk. #This is a comment cache=/var/cache/smart smartctl=/usr/local/sbin/smartctl useSN=0 ada0 - ada1 + da5 /dev/da5 -d sat + twl0,0 /dev/twl0 -d 3ware,0 + twl0,1 /dev/twl0 -d 3ware,1 + twl0,2 /dev/twl0 -d 3ware,2 The variables are as below. @@ -54,8 +57,13 @@ The variables are as below. useSN = If set to 1, it will use the disks SN for reporting instead of the device name. 1 is the default. 0 will use the device name. +A disk line is can be as simple as just a disk name under /dev/. Such as in the config above +The line "ada0" would resolve to "/dev/ada0" and would be called with no special argument. If +a line has a space in it, everything before the space is treated as the disk name and is what +used for reporting and everything after that is used as the argument to be passed to smartctl. + If you want to guess at the configuration, call it with -g and it will print out what it thinks -it should be. +it should be. =cut @@ -194,25 +202,30 @@ my @configA=split(/\n/, $config_file); my $configA_int=0; while ( defined( $configA[$configA_int] ) ){ my $line=$configA[$configA_int]; + chomp($line); $line=~s/^[\t\s]+//; $line=~s/[\t\s]+$//; my ( $var, $val )=split(/=/, $line, 2); + my $matched; if ( $var eq 'cache' ){ $cache=$val; + $matched=1; } if ( $var eq 'smartctl' ){ $smartctl=$val; + $matched=1; } if ( $var eq 'useSN' ){ $useSN=$val; + $matched=1; } if ( !defined( $val ) ){ - push(@disks, $var); + push(@disks, $line); } $configA_int++; @@ -238,11 +251,22 @@ if ( ! defined( $opts{u} ) ){ } my $toReturn=''; -my $int=0; -while ( defined($disks[$int]) ) { - my $disk=$disks[$int]; +foreach my $line ( @disks ){ + my $disk; + my $name; + if ( $line =~ /\ / ){ + ($name, $disk)=split(/\ /, $line, 2); + }else{ + $disk=$line; + $name=$line; + } my $disk_sn=$disk; - my $output=`$smartctl -A /dev/$disk`; + my $output; + if ( $disk =~ /\// ){ + $output=`$smartctl -A $disk`; + }else{ + $output=`$smartctl -A /dev/$disk`; + } my %IDs=( '5'=>'null', '10'=>'null', @@ -333,7 +357,7 @@ while ( defined($disks[$int]) ) { my $selective=scalar grep(/Selective/, @outputA); # get the drive serial number, if needed - my $disk_id=$disk; + my $disk_id=$name; if ( $useSN ){ while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { $disk_id = $1; @@ -345,7 +369,6 @@ while ( defined($disks[$int]) ) { .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; - $int++; } if ( ! $noWrite ){ From d105dc334aa88a2fdc6f392241e229fd5cea4b80 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Mar 2019 02:58:30 -0500 Subject: [PATCH 195/497] update the date --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 2fd3c145a..1f68ccb33 100755 --- a/snmp/smart +++ b/snmp/smart @@ -1,5 +1,5 @@ #!/usr/bin/env perl -#Copyright (c) 2017, Zane C. Bowers-Hadley +#Copyright (c) 2019, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, From 92d192d5708bdde62966ed77b15da767b58993fc Mon Sep 17 00:00:00 2001 From: Kovrinic Date: Tue, 9 Apr 2019 22:04:26 -0500 Subject: [PATCH 196/497] Moved sudo command into the snmpd.conf. Added notes for settings to change if using older ZoL zfs. --- snmp/zfs-linux | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 87677d0b5..122eb9bbe 100755 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -109,12 +109,12 @@ def main(args): ## account for variations between ZoL zfs versions proc = subprocess.run(zpool_cmd_list, **std) - if (proc.returncode == 1) and (('root' in proc.stderr) or ('admin' in proc.stderr)): - zpool_cmd = ['sudo'] + zpool_cmd # elevate zpool with sudo - zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] - proc = subprocess.run(zpool_cmd_list, **std) if (proc.returncode == 2): # -p option is not present in older versions + # edit snmpd.conf zfs extend section to the following: + # extend zfs /usr/bin/sudo /etc/snmp/zfs-linux + # make sure to edit your sudo users (usually visudo) and add at the bottom: + # snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue proc = subprocess.run(zpool_cmd_list, **std) exact_size = False From bc1fe55c759d89cef86024d47f2d980959eeacdb Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:50:16 -0700 Subject: [PATCH 197/497] Added Wrapper for SAS drives. --- snmp/smart | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/snmp/smart b/snmp/smart index 1f68ccb33..31afbdc31 100755 --- a/snmp/smart +++ b/snmp/smart @@ -339,8 +339,42 @@ foreach my $line ( @disks ){ $IDs{$id}=$temp; } + + + } + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) + + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + + if ($line =~ "Elements in grown defect list:"){ + + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[5]; + + # Reallocated Sector Count ID + $IDs{5}=$raw; + + } + + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + + if ($line =~ "Current Drive Temperature:"){ + + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[3]; + + # Temperature C ID + $IDs{194}=$raw; + + } + + # End of SAS Wrapper + $outputAint++; } From 3388f898183eb8ee3945386226db84fa14826bbc Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:55:34 -0700 Subject: [PATCH 198/497] Removed Excess Spacing --- snmp/smart | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index 31afbdc31..6fde05255 100755 --- a/snmp/smart +++ b/snmp/smart @@ -338,10 +338,7 @@ foreach my $line ( @disks ){ my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - - - - + } # SAS Wrapping From 6d3118159d4e99a244a15616255e5b3dd5cf20b5 Mon Sep 17 00:00:00 2001 From: Munzy Date: Tue, 16 Apr 2019 13:57:30 -0700 Subject: [PATCH 199/497] Spacing Fixes --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 6fde05255..e31fd8838 100755 --- a/snmp/smart +++ b/snmp/smart @@ -338,7 +338,7 @@ foreach my $line ( @disks ){ my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - + } # SAS Wrapping From 26f6d5bd8cdfe77cf1f946d309b7be6cc53f7798 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 5 May 2019 23:14:58 +0200 Subject: [PATCH 200/497] dhcp-status - seperate binary path configuration from code --- snmp/dhcp-status.sh | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 20bf2b66b..972214718 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -4,6 +4,7 @@ # edit your snmpd.conf add the below line and restart snmpd # # extend dhcpstats /opt/dhcp-status.sh # ################################################################ + FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' BIN_CAT='/usr/bin/cat' BIN_GREP='/usr/bin/grep' @@ -11,6 +12,12 @@ BIN_TR='/usr/bin/tr' BIN_SED='/usr/bin/sed' BIN_SORT='/usr/bin/sort' BIN_WC='/usr/bin/wc' + +CONFIGFILE=dhcp-status.conf +if [ -f $CONFIGFILE ] ; then + . dhcp-status.conf +fi + DHCP_LEASES='^lease' DHCP_ACTIVE='^lease|binding state active' DHCP_EXPIRED='^lease|binding state expired' From 997d5d2c5b9b76dfc1776c470279a5cc2da50713 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 1 May 2019 14:55:54 +0200 Subject: [PATCH 201/497] add mdadm support --- qq | 2069 ++++++++++++++++++++++++++++++++++++++++++++++++++++ snmp/mdadm | 104 +++ 2 files changed, 2173 insertions(+) create mode 100644 qq create mode 100755 snmp/mdadm diff --git a/qq b/qq new file mode 100644 index 000000000..58a21ca28 --- /dev/null +++ b/qq @@ -0,0 +1,2069 @@ +commit 3361bf4c3ae5868b00d09215e10359f58a36ac12 +Author: SourceDoctor +Date: Wed May 1 14:55:54 2019 +0200 + + add mdadm support + +commit d822c899a78bdfd1e7d9f4df2bd5cd512b1696bd +Merge: 8fbfbd5 544fd8b +Author: VVelox +Date: Sun Mar 24 03:56:16 2019 -0500 + + Merge pull request #226 from VVelox/smart-update + + SMART monitoring update adding RAID support + +commit 544fd8bd6e525b3c29d9965c2b405b39ba49a98d +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 02:58:30 2019 -0500 + + update the date + +commit 8fbfbd5b39bbc22ca606327813c4fe54b38e4d30 +Merge: cb04f8c 38acc2b +Author: VVelox +Date: Tue Mar 19 02:53:30 2019 -0500 + + Merge pull request #225 from VVelox/pa-fix + + portactivity fixes + +commit 503fb9f7389d8307074ed856f96a870a0d26dd72 +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 02:25:17 2019 -0500 + + tested and it appears to work properly... documentation updated + +commit bdfd0ceea948382684a2bd96659731f9ac5f15b1 +Author: Zane C. Bowers-Hadley +Date: Tue Mar 19 00:40:06 2019 -0500 + + update the guessing to only use smartctl --scan-open and generate with more complex options + +commit 38acc2bd3d8e81414b4bfc2cb2bb3e955877fbc1 +Author: Zane C. Bowers-Hadley +Date: Mon Mar 18 03:39:17 2019 -0500 + + actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol + +commit cb04f8c0ac148cb2b250d0a408f672db22e99ed5 +Merge: 147cb67 af32f56 +Author: VVelox +Date: Sun Mar 17 23:27:46 2019 -0500 + + Merge pull request #224 from VVelox/zfs-fix + + ZFS-FreeBSD divide by zero fix + +commit af32f56a74e0d9915b4beb419a28814e9bf058d8 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:07:59 2019 -0500 + + merge... and update version + +commit 658c3c6ead712837bbb763c6b9ecdd782b043629 +Merge: 6564128 147cb67 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:06:57 2019 -0500 + + Merge branch 'zfs-fix' of https://github.com/VVelox/librenms-agent into zfs-fix + +commit 656412830564593cfefeee5dceeae89bfa371000 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 06:02:43 2019 -0500 + + remove unneeded else statement and re-apply patch + +commit 3ce06d6defc63f200f2bbfec7718748c8ec9e832 +Author: Zane C. Bowers-Hadley +Date: Sun Mar 17 05:55:33 2019 -0500 + + freshly initilized ZFS pulls that are not in use don't have a $data_demand_total + +commit 147cb67824b213045826677946166c8ee807f23c +Author: Tony Murray +Date: Tue Feb 12 20:33:05 2019 -0600 + + Use os-release whenever possible for the distro script (#220) + + Except centos... https://bugs.centos.org/view.php?id=8359 + +commit c9a0d2893e44f89f7c8c9450a9d42438eff1404d +Author: Felicián Hoppál +Date: Mon Feb 11 23:06:57 2019 +0100 + + Fix: zpool list output changed, incorrect values (#219) + + * fix zpool data, output of zpool list -pH changed in freebsd 11 + + * fix zpool data, output of zpool list -pH changed in freebsd 11 + + * bump version + + * version dump to 2 + +commit 3a407e3f721b7677fb2724af736ea87838d4dcc5 +Author: Tony Murray +Date: Thu Jan 17 11:44:02 2019 -0600 + + Update powerdns script to json (#218) + +commit ad300c035a2be4a55553c2994d5ce7ba69d57432 +Author: VVelox +Date: Wed Jan 9 23:41:39 2019 -0600 + + various misc fixes for the postfix poller (#112) + + * update postfix + + * move a few things to reduce the number of changed lines + + * move mself to the end + + * white space cleanup and another small cleanup of $chr + + * use $chrNew instead of $chrC when writing the current values + + * more white space cleanup + + * replace one more missed instance of iuoscp + +commit c40606140114b9059409f17a21b06fe8655b760e +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Thu Jan 10 18:40:40 2019 +1300 + + Fix: InnoDB stat support for MariaDB v10+ (#211) + + * mariadb innodb support for v10+ + + * fix newer innodb insert buffers + + * agent mysql to snmp extend + +commit 6fdaffa1b2ba8c49ed8bd38fb6445335b3146329 +Author: Mike Centola +Date: Thu Jan 10 00:35:28 2019 -0500 + + Added gpsd script for SNMP Extend (#217) + + Fixed Typos + + Fixed another typo + +commit f54c442d06abd7d2112dc4dc5db315524030308c +Merge: 1b90904 107d72e +Author: CrazyMax +Date: Sat Dec 29 22:17:13 2018 +0100 + + Merge pull request #216 from jasoncheng7115/patch-2 + + Added Proxmox VE Versoin support + +commit 1b90904f61c6d4078f2b427e17c82cf1f8b926ba +Author: VVelox +Date: Fri Dec 28 20:10:13 2018 -0600 + + convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) + + * convert fbsdnfsclient over to JSON + + * Convert the server stuff to JSON and fix the output of the client extend. + + * misc. stuff + + * lots of cleanup and sanity added to the FreeBSD NFS scripts + + * fix the #! line + + * update the docs at the top + +commit 5be1b168ba4e03ba3a58b3833a26587474ff7b29 +Author: VVelox +Date: Fri Dec 28 20:08:46 2018 -0600 + + JSON SNMP extend for UPS-APC app. (#189) + + * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON + + * finish documenting it + + * add version and remove units from the returned values + +commit 107d72e862c2e2a53870272859252a5d39bf8c72 +Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> +Date: Tue Dec 25 09:15:22 2018 +0800 + + Added Proxmox VE Versoin support + +commit 433d744953fa800ce49fa060b141c10663c0b952 +Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> +Date: Sun Dec 16 22:21:00 2018 +0800 + + Added FreeNAS Version support (#215) + + Hi, + + I added FreeNAS version information support, as shown in the figure: + + ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) + ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) + +commit 3c4511d987c2058bd6e8605bb0e87c6fc1d36861 +Merge: ff124a1 dc3d267 +Author: CrazyMax +Date: Fri Dec 14 19:03:01 2018 +0100 + + Merge pull request #214 from dsgagi/patch-1 + + Fix Debian detection on Proxmox - lsb_release binary doesn't exist + +commit dc3d2673ddc86d02ca2cd8d93bbf2fd53ca43c55 +Author: dsgagi +Date: Fri Dec 14 18:49:58 2018 +0100 + + Update distro + + Remove extra white spaces. + +commit 456d2e7672d8532af4df7f6da2b5c18b02778bf7 +Author: dsgagi +Date: Fri Dec 14 18:47:54 2018 +0100 + + Update distro + + Minor changes to the code, for better output. + +commit 5b53ab54c8a6d9f3b81abf42725b5da2b3ebec3d +Author: dsgagi +Date: Wed Dec 12 16:09:25 2018 +0100 + + Update distro + +commit ff124a1358755ceddc0ae6a4187d358da0d54d06 +Author: VVelox +Date: Thu Nov 22 09:04:58 2018 -0600 + + add portactivity SNMP extend (#159) + + * add portactivity SNMP extend in its initial form + + * update for the current json_app_get + + * add version to the returned JSON + + * add basic POD documentation + +commit a827734c0ec0e0cdf5e2a04730ec68dbad3fd477 +Author: gardar +Date: Thu Oct 25 19:19:20 2018 +0000 + + CloudLinux distro detection (#208) + + Added CloudLinux distro detection, previously CloudLinux got identified as RedHat + +commit 8d66211adc47d3bad5dd042e3ddbc59a23a28819 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Thu Oct 25 07:17:42 2018 -0400 + + Fix package manager detection (#204) + + * Fix package manager detection + + * use release file for os detection + + * Use command to to validate package manager type + + * check if exists and the execute permission is granted + + * make script more portable + +commit d49fe954dfdeffbeee091051f1f0c515d020f281 +Author: Félim Whiteley +Date: Tue Oct 23 17:46:54 2018 +0100 + + Add divide by zero check (#191) + + On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error + + Traceback (most recent call last): + File "/usr/local/bin/zfs-linux", line 178, in + sys.exit(main(sys.argv[1:])) + File "/usr/local/bin/zfs-linux", line 76, in main + DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 + ZeroDivisionError: division by zero + +commit 381cc2466af521772607c682a9a707471a38ff4b +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Tue Oct 23 08:51:12 2018 -0400 + + fix nginx script indentation (#205) + +commit 3dada041e433318592e137678d24c32dd1a134b4 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Thu Oct 18 10:37:10 2018 -0400 + + Fix binary operator expected error (#203) + +commit ccb244aa09de36e4e4dd85120702580144e86383 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:28:39 2018 -0400 + + osupdate script clean up (#199) + + - Change script name for simplify of configuration management orchestration scripts. + - Update code syntax. + +commit f0f34b4a2d1a36836f6bffe4307d5d51524009b4 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:28:07 2018 -0400 + + phpfpmsf script clean up (#198) + + - Change script name for simplify of configuration management orchestration scripts. + - Update code syntax. + +commit e0dcd4a064cedb09241e4af17198bf61e8fd1bf3 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:27:21 2018 -0400 + + nginx script clean up (#197) + + - Change script name for simplify of configuration management orchestration scripts. + - Change 172.0.0.1 to localhost for better nginx handling. + +commit 1c61a96344317c13fce90811c11c0fa4cb7efb36 +Author: sparknsh <38894705+sparknsh@users.noreply.github.com> +Date: Wed Oct 17 12:26:45 2018 -0400 + + ntp-client data correction (#196) + + NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. + + Remove the .sh to simplify for configuration management orchestration scripts. + +commit 28a2f8ae55db7ca773f881560017b4890bc4bbce +Author: voxnil <14983067+voxnil@users.noreply.github.com> +Date: Mon Oct 15 13:00:16 2018 -0700 + + Update zfs-linux to use env for python + +commit ca7a5cdafe6dd603538aad8f63bc624143f98377 +Author: Brock Alberry +Date: Wed Sep 19 09:09:04 2018 -0400 + + PhotonOS distro detection (#193) + + * PhotonOS distro detection + + Detection before `/etc/os-release` since that is present yet missing the build number. + + * awk detection + + combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 + +commit 7542bd26f4c883c7e622056a1a34909d1dc9aa2c +Author: Allison +Date: Tue Sep 18 20:20:23 2018 -0700 + + Update distro (#194) + + Adding full detection for ASUSWRT-Merlin + +commit 7c173b160c5be401fa36d85edf15add61a3146d7 +Author: VVelox +Date: Mon Aug 27 04:03:01 2018 -0500 + + convert all the NTP stuff to JSON (#174) + + This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . + + Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. + + Has been tested as working on DD-WRT and FreeBSD. + +commit 99ad80740cb2fcea1c33e59caf1c05af5a53a14f +Author: VVelox +Date: Sun Aug 19 17:47:07 2018 -0500 + + update for the new json_app_get stuff (#179) + +commit c772ac97d3f5b805c311fd13d924513b4561d10b +Author: crcro +Date: Fri Aug 10 00:44:02 2018 +0300 + + added rockstor nas distro detection (#187) + +commit c535b1286c7701a2cefcd10ffd799fba65e56dd2 +Author: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> +Date: Thu Jul 19 22:39:08 2018 +0200 + + Asterisk Script (#183) + + Asterisk App support. + - Channels + - Calls + - Total SIP Peers + - Monitored Online + - Monitored Offline + - Unmonitored Online + - Unmonitored Offline + +commit 7e55d1cd5db04019de09aff7b134a85df71e901a +Author: István Sárándi +Date: Mon Jun 25 16:10:00 2018 +0200 + + Update fail2ban extend script to new JSON format (#181) + + As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). + Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. + +commit b5d77f1a999c5e0f08bc02550fd24e7c37b759c7 +Author: VVelox +Date: Mon May 28 07:22:09 2018 -0500 + + convert fail2ban-client to JSON (#172) + + * convert to JSON + + * add version return + + * change the version number of the returned data to 1 + +commit 41d36dc97f6886bae4ae6e8ba928892ef9d3c8c3 +Author: VVelox +Date: Fri Apr 27 16:46:57 2018 -0500 + + make using SN or device name selectable for SMART reporting (#168) + + * make using SN or device name selectable + + * change the default to SN + +commit 385d466eee1adc06eecd4a84cfd6615f2e4ba2ec +Author: Sander Steffann +Date: Fri Apr 13 17:42:27 2018 +0100 + + Add random entropy monitoring (#173) + +commit a56adb467a1cdf9785f977420dd07a48335f41b3 +Author: Serphentas +Date: Wed Apr 11 10:39:32 2018 +0200 + + add zfs support for linux (#170) + + * add zfs support for linux + + * fix pools and anon_hits_per + + * strip percent sign for pool cap + + * fix anon_hits json key typo + + * fix demand_data_hits json key typo + + * fix comparison as in #169 + + * fix min_size_percent + +commit 8ec6017246edc9784e670d84bd8b52ec094dbb82 +Author: VVelox +Date: Wed Apr 11 02:34:39 2018 -0500 + + correct arc size breakdown (#171) + +commit 3ddb1d6be6b4a4a0cd006251b497bb1ccf8170e8 +Author: VVelox +Date: Tue Apr 10 22:04:07 2018 -0500 + + correct arc size breakdown + +commit 90fd6f60f3aed5f71140d23a8d022ae9909e7473 +Author: Dylan Underwood +Date: Fri Mar 23 11:24:02 2018 -0500 + + Should be greater than or equal to (#167) + +commit 3a8462461595535a53554b0ad66bc922118e83d1 +Author: endofline +Date: Tue Feb 27 23:10:35 2018 +0200 + + Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) + +commit bbd3b1309aaa3ecaf6f502e92718719539715c58 +Author: endofline +Date: Sun Feb 18 22:33:42 2018 +0200 + + Fix Command_Timeout missing from SMART output (#163) + +commit fd9fd178a4b43feafb414822167b3033693c8efc +Author: crcro +Date: Sat Jan 6 22:06:45 2018 +0200 + + extend: powerdns-dnsdist (#158) + + * powerdns-dnsdist app + + * fix script in help + + * removed local data manipulation + + * again name of file in script help + + * removed personal api info + +commit bacaca0be4104cc003222b941e433d5470cae76d +Author: VVelox +Date: Sat Dec 30 05:42:37 2017 -0600 + + ZFS SNMP agent :3 <3 (#156) + + * Add it as it currently is. Needs to be moved over to JSON + + * rename it to zfs-freebsd as it is FreeBSD specific + + now uses JSON + + * misc. updates and document it all + + * minor spelling correction + +commit c7cae0765e0f5072fdf3dd224f357290e2697fb5 +Author: VVelox +Date: Sat Dec 30 05:39:36 2017 -0600 + + update the fail2ban stuff (#155) + + Dropping firewall checking as the new fail2ban uses pf and anchors on + FreeBSD, which while esoteric as fuck works nicely and is reliable. + +commit 8920cd3f290e8c13a3bb7db96ceb8db05845869d +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Wed Dec 13 16:13:10 2017 +1300 + + freeradius.sh: new agent for incoming main PR (#151) + + * Update os-updates.sh + + * Update os-updates.sh + + * Update os-updates.sh + + * Create freeradius.sh + + * Update freeradius.sh + + * Update freeradius.sh + +commit 3b9d632a8d6dbd6ac3f42f75ba36faa235ef4440 +Author: arrmo +Date: Mon Dec 4 14:11:17 2017 -0600 + + hddtemp, ignore devices not supporting SMART (#153) + +commit 7fb48df8579a8e113153c1439a4fa92829847d9f +Author: Daniel Bull +Date: Fri Oct 27 06:41:05 2017 +0100 + + Fix: Apache SNMP extend IndexError (#116) + + See issue for more information: + https://github.com/librenms/librenms-agent/issues/95 + +commit 2996ad88b00f24777c0e5629cb931b8b448dd515 +Author: dragans +Date: Fri Oct 27 07:39:09 2017 +0200 + + fix: Update mysql (#127) + + Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). + + Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. + +commit d0762871b4cfb0a7cbfcc5ba99bc1fe0b0c51cf3 +Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> +Date: Tue Oct 10 08:02:05 2017 +1300 + + os-update.sh: back to package management based and count fixes (#149) + + * Update os-updates.sh + + * Update os-updates.sh + + * Update os-updates.sh + +commit 6a40ca1e9cc4319e6b7363541feb9681dcf5bc5f +Author: tomarch +Date: Wed Sep 20 21:47:11 2017 +0200 + + fix munin agent (#148) + + Without the full munin-scripts path, this script won't find munin file and return nothing. + +commit 1b03d2f9f74ca29b177e596c0ff2ba13a0e1292d +Author: Uwe Arzt +Date: Wed Sep 6 20:42:58 2017 +0200 + + Add Oracle Linux Distribution to distro script (#146) + + * Add Oracle Linux to distro script + + * Revert local change + +commit 45478555df856af51e707c3cd6ace716c709e0fb +Author: arrmo +Date: Sun Aug 27 14:59:15 2017 -0500 + + Update Distro, for Raspbian Support (#144) + +commit 3380a85ff13f0dad706690b71b2bd8e9d9452926 +Author: Zucht +Date: Sat Aug 12 17:30:02 2017 +0200 + + Update raspberry.sh (#143) + + Fix state WMV9 + +commit a50e1dffb89738814a1183e2e0560ab86daaf3f0 +Author: Neil Lathwood +Date: Thu Aug 3 17:11:26 2017 +0100 + + Update raspberry.sh (#140) + +commit 584fd645d470e85e30607b8be3102292b4a7b54e +Author: drid +Date: Wed Jul 12 22:55:02 2017 +0300 + + C.H.I.P. power values (#134) + + * C.H.I.P. power values + + * Added attribution + + * Fix ACIN current calculation + + * Battery current fix + +commit 3f9dc0f5f02c1590d6e84ac10c6f7c973d54f771 +Author: RedChops +Date: Thu Jun 29 16:11:26 2017 -0400 + + Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) + +commit a4efb62466c58ee05b3c078283a2a9fecb7cd3ce +Author: Stefan Funke +Date: Wed Jun 28 22:36:26 2017 +0200 + + unnecessary use of wc while already calling grep (#137) + + * useless call of wc while already calling grep + + * move grep count call to CMD_GREP to stay in project style + +commit cc6d7882dba89bce0a1f3a27d9fd3b399a2430b9 +Author: einarjh +Date: Sat Jun 10 11:20:48 2017 +0200 + + Strip all non-ASCII characters from hddtemp output (#136) + +commit 3903f431f7f56ef4f48bd50d28c05aec8e795bc0 +Author: crcro +Date: Tue Jun 6 01:00:29 2017 +0300 + + bash script for pi-hole app (#135) + +commit 84630dfb84539936efa47bfe3b13638f809a82c5 +Author: Félim Whiteley +Date: Wed May 31 22:23:38 2017 +0100 + + Fix for first line as localhost (#130) + + An example output like below where the first line of output is just "localhost" so it causes the splitting to cause an out of index error. + + Example: + cat /tmp/apache-snmp + localhost + ServerVersion: Apache/2.4.25 (Ubuntu) PHP/5.6.30-5+deb.sury.org~trusty+2 + ServerMPM: prefork + Server Built: 2016-12-21T00:00:00 + CurrentTime: Thursday, 18-May-2017 19:26:43 EDT + RestartTime: Thursday, 18-May-2017 11:35:48 EDT + ParentServerConfigGeneration: 2 + ParentServerMPMGeneration: 1 + ServerUptimeSeconds: 28255 + ServerUptime: 7 hours 50 minutes 55 seconds + Load1: 0.04 + Load5: 0.05 + Load15: 0.10 + Total Accesses: 5367 + Total kBytes: 61432 + CPUUser: 19.69 + CPUSystem: 1.05 + CPUChildrenUser: 0 + CPUChildrenSystem: 0 + CPULoad: .0734029 + Uptime: 28255 + ReqPerSec: .189949 + BytesPerSec: 2226.38 + BytesPerReq: 11721 + BusyWorkers: 1 + IdleWorkers: 6 + Scoreboard: ___....._.__.W........................................................................................................................................ + +commit 16178c6ac31ed2511243ccfab5b25b69b031d3fa +Author: Aldemir Akpinar +Date: Thu Jun 1 00:23:07 2017 +0300 + + Added devuan support for os-updates.sh and removed code repitition (#131) + +commit f473c5e30ca0649baa590dd5a7f041ce91f57e73 +Author: BlackDex +Date: Tue May 23 14:44:05 2017 +0200 + + Added try-except checks for global values. (#107) + + Fixed an error which prevented output. + It seems some ceph version probably use different values or something. This is a quick fix to have the script output the correct values. + +commit 6fdcc91f7041ad49cbb906b814a1b5ecf8fd2e4c +Author: Karl Shea +Date: Thu May 4 02:06:32 2017 -0500 + + Fix bind config file read (#125) + +commit e3dad6cfc9c6549e1f5cfef41ef2cf20a9827352 +Author: VVelox +Date: Wed May 3 09:23:40 2017 -0500 + + BIND cleanup and expansion (#108) + + * add BIND named SNMP extend + + * nolonger piss the entire stats across the wire, but crunch them and return them + + * more work on bind + + * more misc. updates + + * add proper agent support as well as optional zeroing + + * add -m + +commit 69eee9fb898bd521e3f4ab5d2d93cf5b34949e1d +Author: Aldemir Akpinar +Date: Tue May 2 12:22:19 2017 +0300 + + Added Devuan GNU/Linux support (#124) + +commit eaa6af235978405418d8e6d6e0beb04f761a578b +Author: crcro +Date: Thu Apr 27 22:54:55 2017 +0300 + + snmp-extend: sdfsinfo (#122) + + * sdfsinfo app snmp extend + + * rewrite script to bash + + * more vars + +commit 69e1ace889cfee6963cc6506a5e96fb30cabac1b +Author: RedChops +Date: Sat Apr 22 19:29:00 2017 -0400 + + Include missing SMART ids in the output (#120) + +commit 705cc0f3fe62e4837ecf4be86dec95558ca07ff3 +Author: Svennd +Date: Tue Apr 18 22:34:05 2017 +0200 + + add support for SGE/rocks job tracker (#118) + +commit d7085e001cebf0bf086b84ac0c65cad54f90ee38 +Author: Chris Putnam +Date: Tue Apr 18 13:32:41 2017 -0700 + + hddtemp: parallelize calls to hddtemp for performance (#117) + + This poll script runs hddtemp with a list of all drives as arguments and reads the output. hddtemp scans each drive's SMART status serially, which scales poorly with a large number of drives. + + In lieu of a patch to the actual hddtemp project, optionally use GNU parallel when available to parallelize the call to hddtemp. + + In testing a machine with 58 drives I went from a runtime of about 5 seconds per run to 0.5s, a performance improvement of 10x. + +commit 5f47aad492a679a81da0a19f2649f60d6637e199 +Author: Chris Putnam +Date: Fri Apr 7 01:45:56 2017 -0500 + + hddtemp: improve detection of drives (#115) + + Previously, this script was only able to find 26 drives (sda-sdz) due to the use of globbing. + + A better strategy for detecting drives would be to use lsblk on systems that support it, failing over to globbing. + + This patch adds support both for lsblk and a more comprehensive glob solution with find that will at least catch 26^2 drives. + +commit 67bae5a86cfe47c90ade541c1e613f7e5e788cfd +Author: Philip Rosenberg-Watt +Date: Thu Apr 6 03:24:36 2017 -0600 + + fix: Update proxmox agent to use new Perl module (#88) + + PVE::API2Client is deprecated in Proxmox 4.4-6. Agent now requires + installation of libpve-apiclient-perl via apt. + + This commit fixes #81. + +commit a7fe1f8e6f98640463a93f934ac2580311db09ca +Author: Tony Murray +Date: Wed Mar 29 19:11:23 2017 -0500 + + Copy powerdns-recursor to snmp and remove <<>> header (#111) + +commit 74faec281c13928e60c140d85bb3138e7297fa79 +Author: Florian Beer +Date: Thu Mar 30 00:00:26 2017 +0200 + + Postfix app bug fixes (#105) + + * Postfix app bug fixes + + - add missing DS + - fix some totals + + * Move new variable to end of output + +commit 1e7762fb4eb832ed9d7530994804a284028c9c7c +Author: VVelox +Date: Wed Mar 22 09:28:57 2017 -0500 + + add SMART SNMP extend script (#101) + + * add SMART SNMP extend + + * cleanup default disk examples + + * correct a small typo + + * add option caching support + + * add checking selftest log and nolonger zeros non-existent IDs + + * now uses a config file + + * add the ability to guess at the config + + * properly remove device entries with partitions now and avoid adding dupes in a better manner + + * now have smartctl scan as well to see if it missed anything + + * note why ses and pass are ignored + + * properly use the cache file in the config now + + * actually use the cache now + +commit 94aa0feacdfc71b6d8044c66992069538071ca39 +Author: VVelox +Date: Sun Mar 19 13:03:59 2017 -0500 + + add unbound SNMP extend script (#102) + +commit 495f46afb431a0ef29fe58c40a01c7f9d352c3d5 +Author: Tony Murray +Date: Fri Mar 10 06:29:19 2017 -0600 + + Update mysql script to php7 version... (#104) + + * Update mysql script to php7 version... + + * Update mysql + +commit 61579bf0ace0a963f6ffbf9ca263910c5f6614fe +Author: Tuxis Internet Engineering V.O.F +Date: Wed Mar 8 09:51:04 2017 +0100 + + Enable ipv6 in Xinetd (#100) + + * Fix indenting and enable IPv6 in Xinetd + + * Fix changelog + + * Typo + +commit 7f79fc4167adac967d89d0ee6277f78886a5c7b9 +Author: Tony Murray +Date: Tue Mar 7 23:48:15 2017 -0600 + + Update mysql + +commit 1b1d8b491f842edc3e04c5405ae13de4f60a6751 +Author: VVelox +Date: Tue Mar 7 23:40:09 2017 -0600 + + clean up snmp/mysql_stats.php and make it a proper snmpd extend script now (#99) + + * cleanup and make it something that can properly be invoked via cli + + * blank the user/pass/host bits increasing the chances it will work out of the box + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Update mysql_stats.php + + * Rename mysql_stats.php to mysql + +commit e7c331070100290b3780ba6907add81be82165c6 +Author: VVelox +Date: Fri Mar 3 14:41:38 2017 -0600 + + add Nvidia SNMP extend poller (#94) + + * add Nvidia SNMP extend + + * update the extend path + + * now support more than 4 GPUs + + this will now support how ever many GPUs are installed on a system... + + Just double checked and it appears nvidia-smi dmon only reports up to 4 GPUs at a time... so if we have more than 4, begin checking they exist and if so print them + +commit 2308481188f72bbad12d7d94ebd941a73fc97655 +Author: VVelox +Date: Fri Mar 3 12:55:55 2017 -0600 + + add squid snmp extend (#93) + +commit 2700598925c8481641def507a4bf902a27cb01af +Author: VVelox +Date: Fri Mar 3 08:49:15 2017 -0600 + + FreeBSD NFS extends (#90) + + * add the FreeBSD NFS client and server extends + + * white space cleanup + + * white space cleanup + +commit db3b5c7cec8fa35832739e742c84fa61e465bd9f +Author: VVelox +Date: Wed Mar 1 17:46:13 2017 -0600 + + add Postgres SNMP extend (#91) + + * add Postgres SNMP extend + + * minor comment cleanups + + * use env for check_postgres.pl + + * quote the string + +commit 42e488743917fd39019ac9300caf391a5a8120c8 +Author: VVelox +Date: Wed Mar 1 12:35:06 2017 -0600 + + add detailed Postfix poller (#92) + + * add detailed postfix poller + + * env perl + +commit c4101c9ef2a8e8dffbfaee55f067c7c89fe18e27 +Merge: bb4c67b 8343e7f +Author: Tony Murray +Date: Fri Feb 24 11:10:43 2017 -0600 + + Merge pull request #84 from VVelox/master + + add a snmpd extend script for fail2ban + +commit 8343e7f34e1c382051f65bb9d7cf5bad454b934e +Author: Tony Murray +Date: Fri Feb 24 11:09:21 2017 -0600 + + Update fail2ban + +commit 4fcce9f01dd5b0c7979a2ebc95298ff40239bfd9 +Author: Tony Murray +Date: Fri Feb 24 11:02:19 2017 -0600 + + Redefining $firewalled + +commit 8bfbce68e503b2ddcdcc9619307d168b1c332df3 +Author: VVelox +Date: Thu Feb 23 09:54:38 2017 -0600 + + if cache older than 360 seconds, don't use it + +commit 0a78888889d1e67e5696bb59e2c8fff4fd76f9ff +Author: VVelox +Date: Thu Feb 23 09:13:59 2017 -0600 + + use $f2bc for getting jail status now and not just only overall status + +commit 1e160b86e46ff7023ea13d8de13fe98e52e3b270 +Author: VVelox +Date: Thu Feb 23 08:46:18 2017 -0600 + + don't reuse the variable $iptables + +commit 4b53918a7d09dc705c761c6eba3d0b68caca7159 +Author: VVelox +Date: Thu Feb 23 08:45:04 2017 -0600 + + poke the user about setting a iptables path as well + +commit 90620a8558e0b164fb2a714c007b14b1ba1b1567 +Author: VVelox +Date: Thu Feb 23 08:40:59 2017 -0600 + + misc. path cleanups + +commit 5ee0faa2c38e887b61b34fd4140ae23a8583d350 +Author: VVelox +Date: Wed Feb 22 21:58:03 2017 -0600 + + make caching optional + +commit 4ffd86f8bdbe8825ac0793c1cf0b86a886656f34 +Author: VVelox +Date: Wed Feb 22 21:42:53 2017 -0600 + + Update fail2ban + +commit 0227094c6fc9cf31d7d5f9a45a63e093b6e38aa5 +Author: VVelox +Date: Mon Feb 20 13:18:50 2017 -0600 + + track both firewall and fail2ban-client + +commit 3932875ce04c1b51b8bf4c43c9934f2b29800acb +Author: VVelox +Date: Mon Feb 20 03:50:59 2017 -0600 + + correct a comment + +commit c367e9ff9d61f9cee619c19278a2bdc6d8fc7637 +Author: VVelox +Date: Mon Feb 20 03:49:50 2017 -0600 + + now requires cron usage + +commit d90f3e879200108794beb7a2a4cc047f2938899e +Author: VVelox +Date: Sun Feb 19 23:41:51 2017 -0600 + + use fail2ban-client instead + +commit 710f38e8ff7cee520f9c7cc8ada421b6f32684c5 +Author: VVelox +Date: Sat Feb 18 00:09:12 2017 -0600 + + move this over to perl and properly check iptables + +commit 6f76427952194ca6036181c31402887e72317308 +Author: VVelox +Date: Fri Feb 17 23:08:53 2017 -0600 + + remove pointless exit + +commit 4b600ad2b41be4f338f1745320b3dbd64c5f5ba9 +Author: VVelox +Date: Fri Feb 17 23:08:25 2017 -0600 + + whoops, forgot to remove \ as well + +commit bb4c67b217fc6f553c36861d4da0c5edfd61913c +Merge: ff6ee0e e3f3bd3 +Author: Tony Murray +Date: Fri Feb 17 22:42:57 2017 -0600 + + Merge pull request #86 from florianbeer/patch-1 + + Update shebang + +commit 6955e5d410f87be4423ac86111841721292911fd +Author: VVelox +Date: Fri Feb 17 10:33:02 2017 -0600 + + don't assume it appends the jail name + +commit 8b78f863d34f24858ca3d061df02efa6213d3b3b +Author: VVelox +Date: Fri Feb 17 10:32:27 2017 -0600 + + update to check fail2ban and f2b + +commit e3f3bd3efb36ee391430d61d363afa1e8d322ae3 +Author: Florian Beer +Date: Fri Feb 17 16:37:00 2017 +0100 + + Update shebang + + With the original shebang this script didn't work on Debian and Ubuntu machines. Using `/usr/bin/env bash` makes the script more portable. + +commit ff6ee0e2bc0e84ce1b0c4276713f8cb70d3154a2 +Author: Robert Verspuy +Date: Fri Feb 17 01:46:13 2017 +0100 + + Fixed correct amount of pipeline symbols when degrees symbol is missing. (#85) + + When the script is called through xinetd/check_mk (on my system), there is no degree symbol, but a space. + Changed the script to handle both correctly + +commit 21c953d11af41e1256ecf92070fc36b999b1e084 +Merge: 1ec8f20 58d1726 +Author: kitsune +Date: Thu Feb 16 11:30:03 2017 -0600 + + Merge branch 'master' of https://github.com/librenms/librenms-agent + +commit 1ec8f204ee0c96ca0a9cf77dff7bdb0f79402462 +Author: VVelox +Date: Thu Feb 16 10:50:46 2017 -0600 + + add fail2ban snmpd extend script + +commit 58d17264c7d57978a408b800084f153857d3b3f9 +Author: rockyluke +Date: Thu Feb 16 16:12:01 2017 +0100 + + Use command -v instead binary path (#80) + +commit 60becc9b3a9429a42faae18440821b90ac6586fc +Author: VVelox +Date: Thu Feb 16 09:10:53 2017 -0600 + + add a single pool php-fpm net-snmpd extend poller (#83) + +commit 677bd4187e90211a70419e01fe97a809c6cabfd0 +Author: VVelox +Date: Wed Feb 15 11:37:18 2017 -0600 + + add a single pool php-fpm net-snmpd extend poller + +commit 575956cae3ea5fcb014db3777a83e52026f95baa +Author: crcro +Date: Fri Jan 20 10:47:30 2017 +0200 + + snmp: exim-stats (#79) + + * exim-stats frozen mails + + * added total queue info + +commit d090686b722a1b0d8ded3ebfedec5c3b0f8a46a3 +Merge: ae43e5f dc60463 +Author: Tony Murray +Date: Wed Dec 14 16:39:59 2016 -0600 + + Merge pull request #75 from bungojungo/master + + Added fedora support to distro/os-updates + +commit ae43e5f493941aab81c96e3dc9378da434b55ce6 +Merge: 6c130ea de1f177 +Author: Tony Murray +Date: Wed Dec 14 16:39:47 2016 -0600 + + Merge pull request #73 from paulgear/master + + Make ups-nut work on Debian Jessie + +commit 6c130ea65e191d76a12b7d6d31d4726937b0f3e4 +Merge: e527768 3d061d2 +Author: Tony Murray +Date: Wed Dec 14 16:34:10 2016 -0600 + + Merge pull request #76 from murrant/powerdns-python26 + + Support python2.6 for powerdns scripts + +commit 3d061d24079d0dcb7458a75b3d83d5aaba43acc9 +Author: Tony Murray +Date: Wed Dec 14 16:27:15 2016 -0600 + + Support python2.6 for powerdns scripts + fixes #67 + +commit dc604636bccd8779bd261b013af4872cad14e1f0 +Author: Jason Scalia +Date: Wed Dec 7 22:11:48 2016 -0500 + + added fedora/dnf support + +commit 8b3ca2dac293ef132f1e48afa871b7158d692d90 +Author: Jason Scalia +Date: Wed Dec 7 21:48:22 2016 -0500 + + Added fedora support + +commit de1f1775cc26aacb931141182c212de706b80b5f +Author: Paul Gear +Date: Sat Dec 3 14:16:54 2016 +1000 + + Restore previous default UPS name + +commit 465ec12dd4757baa95560b11f89a433f05fb7454 +Author: Paul Gear +Date: Sat Dec 3 14:07:02 2016 +1000 + + Make ups-nut work on Debian Jessie + + This script was broken on Debian Jessie (and probably + all other Debian-based distros, including Ubuntu). + This commit removes the hard-coding of paths and uses + $PATH per normal bash practice, and should work on a + wider range of shell variants. + +commit e52776889cea5e3379422ce4ffb7171bba4fbdf1 +Author: arrmo +Date: Sat Nov 26 02:12:41 2016 -0600 + + Update to Distro (to support dd-wrt) (#72) + +commit c5fea261dea71cc9600936455bdf357cc062b220 +Author: Mathias B +Date: Thu Nov 17 09:31:56 2016 +0100 + + Add Debian support (#71) + + Before that only Ubuntu was supported, now Debian users can use this nice script too! + +commit 36ed3f008c6f2a0cc0be0cdb1ce9199a6e495dbc +Author: Karl Shea +Date: Sat Oct 8 15:26:07 2016 -0500 + + Agent: script to collect data from GPSD (#69) + +commit 91c251fd94d73f44e8757b242db82ed240f80a1d +Author: Tuxis Internet Engineering V.O.F +Date: Wed Oct 5 11:06:48 2016 +0200 + + fix: a dirty hack to prevent failing of stats when the cluster is rebuilding (#68) + + because Ceph returns '-inf' which the json decompiler doesn't seem to get.. + +commit dd365168a5eedf655d87e34e89664b191f855a15 +Author: crcro +Date: Mon Oct 3 21:27:56 2016 +0300 + + fix conflict (#66) + +commit 58e16b794a0e33d0dd71d8c1f936bc8b25ad7ced +Author: crcro +Date: Sun Sep 25 16:28:37 2016 +0300 + + snmp-extend: os-updates (#65) + + * reverted back to os-release checks, added arch pacman + + * fixed file name + +commit 2699cde73fcbca9e556a762dcfd90c81e5561d26 +Author: crcro +Date: Sun Sep 25 16:28:00 2016 +0300 + + snmp-extend: ups-apcups (#58) + + * snmp-extend-ups-apcups + + * rewrite of apc ups + + * header fix + + * header fix + +commit fa308bfe3f388f110e9df083d6b2c649fa69472e +Author: crcro +Date: Sat Sep 24 20:30:09 2016 +0300 + + snmp-extend: ups-nut update 1 (#63) + + * new code for better matching, snmp-extend compliance + + * removed unused vars + + * extra fixes + + * removed the need of tmp file + + * removed charge_low, deemed useless by user + + * removed values that are not plottable + + * readded ds + +commit f63c4ab7bea382b08d0450b42a374db082ccd0ef +Merge: c1c537e d9f36a8 +Author: Tony Murray +Date: Mon Sep 12 22:01:51 2016 -0500 + + Merge pull request #61 from crcro/app-ntp-server-update-1 + + app: ntp-server update 1 + +commit c1c537eea11fde70435e88b28b17292dc7c72f75 +Merge: 9a2716d 11a9fce +Author: Tony Murray +Date: Mon Sep 12 22:01:24 2016 -0500 + + Merge pull request #57 from crcro/snmp-extend-ups-nut + + snmp-extend: ups-nut + +commit 9a2716dc83ad11462495e5ee804fb122eb402faa +Merge: 87cc835 85ae77c +Author: Tony Murray +Date: Mon Sep 12 19:33:07 2016 -0500 + + Merge pull request #60 from crcro/remove-ntp-php + + remove obsolete ntp scripts + +commit d9f36a84b13dd42361d24df11d6cb60c7b71f260 +Author: crcro +Date: Mon Sep 12 12:48:17 2016 +0300 + + cleaner code + +commit 28cae5cff3b87532fd145c55de5b22aa0f4c6d05 +Author: crcro +Date: Mon Sep 12 11:52:13 2016 +0300 + + better handling default case + +commit aeecb1621c8ed5863d5c7563ffc96047909b8cfa +Author: crcro +Date: Mon Sep 12 11:45:14 2016 +0300 + + header fix + +commit f48f4cc6e513773fac094d6b3115954deaeacbc7 +Author: crcro +Date: Mon Sep 12 11:43:34 2016 +0300 + + update 1 ntp server + +commit 87cc835096ffdd4f8310b51e684f63aa7726d14d +Author: crcro +Date: Sat Sep 10 19:08:03 2016 +0300 + + os-updates.sh clean (#59) + +commit 85ae77c01c28308dd1f58b897aa7c8efe5b87386 +Author: crcro +Date: Sat Sep 10 04:50:33 2016 +0300 + + remove obsolete ntpd-server.php + +commit 262f798a9737a5b62bef0ab7a657782a934b86ac +Author: crcro +Date: Sat Sep 10 04:48:55 2016 +0300 + + remove obsolete ntp-client.php script + +commit 11a9fcef62571e12168b8c1e9d1ac604b65c227d +Author: crcro +Date: Fri Sep 9 15:36:01 2016 +0300 + + snmp-extend-ups-nut + +commit 6128dc3c7133802ff66b199bc99289fb07761d6e +Author: vectr0n +Date: Fri Sep 9 02:16:28 2016 -0400 + + Update hddtemp to include hddtemp -w option (#56) + + hddtemp gives inconsistent values in it's current state, after some debugging I was able to resolve the issue by passing -w to the hddtemp command, this will wake-up the drive if it is in a sleep state to gather information. + +commit 42bc0a07aab450e242471e271380fc29642b34e7 +Author: crcro +Date: Wed Sep 7 22:37:31 2016 +0300 + + ntp-client app using shell only, tested with ntpq 4.2.8p8 (#54) + +commit 718d627cfdbad19848a384fc8eaba332dcaef504 +Author: crcro +Date: Wed Sep 7 22:37:23 2016 +0300 + + app: ntp-server (#55) + + * ntp-server app using shell only, tested with 4.2.8p8 + + * fix for higher stratum value + + * change the description in comment to reflect latest webui push + +commit 351e5aa7bc6f1a79d51b1bd098cace659c1b0e9f +Author: Tatermen +Date: Sun Aug 28 20:06:04 2016 +0100 + + Freeswitch (#53) + + feature: Added freeswitch support + +commit 839b518358d2acb488c3d7709e12392ee2b4c224 +Merge: 6a84755 561efa4 +Author: Neil Lathwood +Date: Tue Aug 23 21:48:08 2016 +0100 + + Merge pull request #52 from murrant/move-scripts + + SNMP extend scripts from the main repo + +commit 561efa41be5e22614912300ac9242582340e0662 +Author: Tony Murray +Date: Mon Aug 22 21:35:13 2016 -0500 + + SNMP extend scripts from the main repo + +commit 6a84755105f651d03939310b4bd5a3cd85dc90dd +Merge: c2e4c33 deb3683 +Author: Tony Murray +Date: Sun Aug 21 19:58:13 2016 -0500 + + Merge pull request #51 from crcro/dhcp_pretty + + rewrite dhcp-stats with loop + +commit deb36833f17d31ddd6176aa7dfc3767817e7c446 +Author: crcro +Date: Mon Aug 22 01:45:23 2016 +0300 + + @paulgear recomandation + +commit c2e4c33abf5edbc0b7a5a00f8871f87d4d0f0513 +Merge: 672918c 9cd81f1 +Author: Tony Murray +Date: Wed Aug 17 09:59:11 2016 -0500 + + Merge pull request #50 from OpcaoTelecom/unbound + + Added unbound stats script + +commit 9cd81f1b930e2ed777ecf3bf6c7deff65df6e564 +Author: Alan Gregory +Date: Wed Aug 17 09:36:39 2016 -0300 + + Added unbound stats script + +commit 672918c40fd87455398267cbf744a52362f738a7 +Merge: 9fe5444 87584e7 +Author: Tony Murray +Date: Tue Aug 16 12:43:10 2016 -0500 + + Merge pull request #48 from crcro/raspberry-sensors + + raspberry sensors + +commit 9fe5444738d086b1d33f92ca0e5905a14cd9c8a0 +Merge: c3afbf3 b6bdb9e +Author: Tony Murray +Date: Mon Aug 15 22:57:52 2016 -0500 + + Merge pull request #49 from murrant/ntp + + Copy ntp scripts from the main repo. + +commit b6bdb9ea45d579becc8f858090e8b7d3e4c809ea +Author: Tony Murray +Date: Mon Aug 15 22:56:31 2016 -0500 + + Copy ntp scripts from the main repo. + +commit 87584e7ef79996db60cd62e64dd4cbaf53a0bac8 +Author: crcro +Date: Sun Aug 14 17:43:27 2016 +0300 + + added snmp extend to get raspberry sensors + +commit c3afbf35bd81bff0dbcdb67e6657dd042ae67588 +Merge: 9623342 aa59548 +Author: Neil Lathwood +Date: Tue Aug 9 19:47:51 2016 +0100 + + Merge pull request #45 from murrant/os-updates + + Do not detect os, detect package managers + +commit 9623342554317ba55f7a987d18250e941a0a7c1f +Merge: 0f5a115 7828777 +Author: Tony Murray +Date: Tue Aug 9 13:08:41 2016 -0500 + + Merge pull request #46 from murrant/distro + + Update distro to match the main repo file + +commit aa59548e0c3d6e5462cd2342ca671dc72430c3f1 +Author: Tony Murray +Date: Tue Aug 9 12:50:23 2016 -0500 + + Do not detect os, detect package managers. + Add pacman support. + +commit 78287777696f6569dfe575770f1c47553fddd5a9 +Author: Tony Murray +Date: Tue Aug 9 11:40:01 2016 -0500 + + Update distro to match the main repo file + +commit 0f5a1150f373371fc508e160e58c56cea5adbb99 +Merge: d6308e4 05fe3f8 +Author: Neil Lathwood +Date: Thu Aug 4 18:53:10 2016 +0100 + + Merge pull request #40 from florianbeer/patch-1 + + Add Debian and make update call more robust + +commit d6308e4e1c04d69688d724c7c5c04ab0a3c94fbc +Merge: 3740f3e 2accc28 +Author: Neil Lathwood +Date: Wed Aug 3 21:09:08 2016 +0100 + + Merge pull request #42 from crcro/app-dhcp-stats + + app-dhcp-stats snmp extend + +commit 2accc2848c44f8c2c33a455eb1a2e4ffe801921c +Author: crcro +Date: Wed Aug 3 22:09:55 2016 +0300 + + app-dhcp-stats snmp extend + +commit 05fe3f8cc195b797f69b0599ca2a2e198f0b5d0c +Author: Florian Beer +Date: Wed Aug 3 12:16:22 2016 +0200 + + Remove update call as this requires root + + See discussion here https://github.com/librenms/librenms-agent/pull/40#issuecomment-237198796 + +commit fac01628a07cf8083f91d9924ab8d63a9d4141db +Author: Florian Beer +Date: Wed Aug 3 04:51:35 2016 +0200 + + Add Debian and make update call more robust + + - Debian based systems need to update the index before being able to report upgradable packages. + - Debian old-stable doesn't have `apt` yet and Ubuntu 14.04 emits the following warning when using `apt` in a script: + `WARNING: /usr/bin/apt does not have a stable CLI interface yet. Use with caution in scripts.` + + By using `apt-get`, issuing a `update` call first and then counting the result of `grep 'Inst'`, this script now works on Debian 7, Debian 8, Ubuntu 14.04 and Ubuntu 16.04. + +commit 3740f3e147d7d97e10e4b8e77757ab67deb2bb84 +Merge: fb678cb 1964aec +Author: Tony Murray +Date: Tue Aug 2 20:35:16 2016 -0500 + + Merge pull request #38 from crcro/master + + app: nfs-v3-stats + +commit fb678cb58df6277be2176e8a45a08af1d8dcb8d5 +Merge: 1d4c452 8d7e0df +Author: Tony Murray +Date: Mon Aug 1 11:26:27 2016 -0500 + + Merge pull request #39 from xbeaudouin/fix_distro_freebsd + + Add FreeBSD detection to distro script + +commit 8d7e0df4eb1e35b776aa17d2e6c2ea202cc021a7 +Author: xavier.beaudouin +Date: Mon Aug 1 11:15:52 2016 +0200 + + Add FreeBSD detection to distro script + +commit 1d4c4529ae907b343b7ffcb6eaeb94563ad2cb69 +Merge: dde18e9 760f9de +Author: Paul Gear +Date: Sat Jul 30 14:14:39 2016 +1000 + + Merge pull request #37 from xbeaudouin/master + + Fix some bash scripts to work with FreeBSD + +commit 1964aece5e421391cc6cb589c668da0b5f2eeaee +Author: crcro +Date: Fri Jul 29 20:22:35 2016 +0300 + + added snmp extend script for os-updates application + +commit 57b6224254eb3992e09358df2d867573512f6809 +Author: crcro +Date: Fri Jul 29 20:19:41 2016 +0300 + + added snmp extend script for nfs-v3-stats application + +commit 760f9de567a2876b0ad793979754661946b92c5c +Author: xavier.beaudouin +Date: Fri Jul 29 13:23:20 2016 +0200 + + /bin/bash => /usr/bin/env bash to allow freebsd agent work without patching each files + +commit dde18e98954c83fb52ae89083214814b5515a6c1 +Merge: 18f4006 9a3846c +Author: Neil Lathwood +Date: Tue Jul 26 20:46:20 2016 +0100 + + Merge pull request #36 from murrant/powerdns-recursor + + PowerDNS Recursor agent + +commit 18f4006e09a1436013eee8ed77927585f714fc43 +Merge: f75fc9f fc07e27 +Author: Neil Lathwood +Date: Tue Jul 26 20:45:38 2016 +0100 + + Merge pull request #33 from murrant/mysql-php7 + + Use mysqli instead of mysql + +commit f75fc9fce5a82c47e1303f5514eb0c421ad5cf93 +Merge: bfdf71d c70d12c +Author: Tony Murray +Date: Fri Jul 22 21:13:58 2016 -0500 + + Merge pull request #35 from murrant/duplicate-nfsstats + + Remove duplicate nfsstats file + +commit 9a3846cac30515a7a01a44ecc9fc6e08e78df1f5 +Author: Tony Murray +Date: Fri Jul 22 15:33:16 2016 -0500 + + PowerDNS Recursor agent + +commit c70d12c83c00e180da8a7e8281acdbd8e4741fa1 +Author: Tony Murray +Date: Fri Jul 22 15:22:48 2016 -0500 + + Remove duplicate nfsstats file + +commit bfdf71d6995ced14ebd1e25042a60c7107a57dc0 +Merge: 41cb583 9501c2f +Author: Tony Murray +Date: Thu Jul 21 22:30:29 2016 -0500 + + Merge pull request #34 from murrant/nfs + + Copy nfsstats script from main repo. + +commit 9501c2f4ffd4649982521c387b3d9dcab1de83d9 +Author: Tony Murray +Date: Thu Jul 21 22:28:41 2016 -0500 + + Copy nfsstats script from main repo. + Send PR to remove scripts from the main repo. + +commit fc07e27c37c74d47c61aeac3cb966062f8da63a2 +Author: Tony Murray +Date: Thu Jul 21 22:26:02 2016 -0500 + + Fix permissions + +commit 41cb5835ff3b0ca41a6392f19e43d590bd08d785 +Merge: db44c10 9bad4df +Author: Tony Murray +Date: Thu Jul 21 21:48:27 2016 -0500 + + Merge pull request #32 from tuxis-ie/proxmox-issue-28 + + Proxmox issue 28 + +commit e80b025818f2f993f4443be3100c5bcd1331812a +Author: Tony Murray +Date: Thu Jul 21 21:31:25 2016 -0500 + + Use mysqli instead of mysql + +commit 9bad4dfb3e586d7892709284cccf17417cf5ec03 +Author: Mark Schouten +Date: Wed Jul 13 15:06:57 2016 +0200 + + Something like this @einarjh ? + +commit 6d27c7edb3f4972a89fbf5641c4ece106b5dbc09 +Author: Mark Schouten +Date: Mon Jul 11 17:06:14 2016 +0200 + + Wrap these calls in an eval to prevent it from dying if its a container instead of a qemu vm. Fixes #28 + +commit db44c1070950c2e06565a39395bb09f09a023b4a +Merge: d00ce4a 5b21301 +Author: Neil Lathwood +Date: Sat Jul 9 19:12:59 2016 +0100 + + Merge pull request #31 from librenms/nfsstats + + Added nfsstats.sh file + +commit 5b21301ecdb761fa0e32f9295c8ea60aef44f3a7 +Author: Neil Lathwood +Date: Sat Jul 9 19:12:13 2016 +0100 + + Added nfsstats.sh file + +commit d00ce4a15a6b52753d108d1aeb2a768e7bfafe36 +Merge: c996b54 ca5a5a1 +Author: Neil Lathwood +Date: Thu Jun 30 08:57:07 2016 +0100 + + Merge pull request #29 from murrant/powerdns-python3 + + Python3 fixes for powerdns agent. Compatible with python2. + +commit ca5a5a12c065eb67e48410ed09ff97630a76f6b8 +Author: Tony Murray +Date: Wed Jun 29 19:52:10 2016 -0500 + + Python3 fixes for powerdns agent. Compatible with python2. + +commit c996b54e79b317785c58963abb6f71c31e61ba10 +Merge: fb7912b 8328d71 +Author: Neil Lathwood +Date: Thu Jun 9 11:38:43 2016 +0100 + + Merge pull request #27 from murrant/rrdcached + + Local script to collect stats from rrdcached + +commit 8328d71c0995fa8f6dc7c50de940fbe9b242fc41 +Author: Tony Murray +Date: Wed Jun 8 20:35:19 2016 -0500 + + Local script to collect stats from rrdcached + Being able to connect to local unix sockets is the primary advantage of this. + +commit fb7912beda4181b23d8cbbbf500a1e7ed4527001 +Merge: 601ac84 8d856e2 +Author: Daniel Preussker +Date: Thu May 5 13:32:02 2016 +0200 + + Merge pull request #25 from Exa-Omicron/master + + Improved hddtemp agent module + +commit 8d856e27648b6df2d89af852ad1cd912319a965f +Author: Robert Verspuy +Date: Thu May 5 10:27:30 2016 +0200 + + Improved hddtemp agent module + + I had some issues with the netcat / daemon implementation of the module. + netcat was stallingor sometimes netcat did not return the full output of hddtemp. + Running hddtemp directly without running it as a daemon is much more stable for me. + + This new version also does not give any stdout output when hddtemp is not installed or when no disks can be found. + Running the script manually on a server does give stderr output for easy debugging. + +commit 601ac843c303d29b8149142a3fac967aaa4a2638 +Merge: 21817b6 1c13779 +Author: Tony Murray +Date: Thu Apr 21 09:46:49 2016 -0500 + + Merge pull request #23 from librenms/freebsd-agent + + Create check_mk_agent_freebsd + +commit 1c1377958e6c8cfd8ca7fd1fd4fcafdae92e1a1b +Author: Neil Lathwood +Date: Thu Apr 21 15:41:06 2016 +0100 + + Update check_mk_agent_freebsd + +commit cdd235a12a0bd4d0cbffe330048fd476aa5fddd5 +Author: Neil Lathwood +Date: Thu Apr 21 15:39:59 2016 +0100 + + Create check_mk_agent_freebsd + + Added freebsd agent + +commit 21817b6b36692bdca8fac8f3ee4a0258a2d2bcee +Author: Tony Murray +Date: Tue Mar 29 08:29:02 2016 -0500 + + Fix wording for systemd unit + +commit 88c4b00b19370bea3e597770793d90b24f24b10b +Merge: dd2b95d 50a3c25 +Author: Neil Lathwood +Date: Tue Mar 29 09:51:00 2016 +0100 + + Merge pull request #22 from murrant/master + + Add systemd unit files + +commit 50a3c25115e501db4bd9fc97a8a8e3b7d81a635e +Author: Tony Murray +Date: Mon Mar 28 12:56:26 2016 -0500 + + Add systemd unit files + +commit dd2b95d8d2eb35bf1b3f0aea34d843af33f1c28e +Merge: 6d0babe ff2bbe6 +Author: Neil Lathwood +Date: Wed Nov 25 13:37:25 2015 +0000 + + Merge pull request #17 from f0o/upstream-snapshot + + Snapshot upstream changes + +commit ff2bbe6882a9b79b93883980b0360f780fc24d76 +Author: f0o +Date: Wed Nov 25 13:26:26 2015 +0000 + + Snapshot upstream changes + +commit 6d0babe0973d5cb8e2d35fd33e2f45e96ae96c15 +Merge: 8e847b9 12e31c1 +Author: Daniel Preussker +Date: Wed Nov 25 13:28:17 2015 +0000 + + Merge pull request #16 from tuxis-ie/powerdns-support + + Powerdns support + +commit 12e31c16c3c42e6d1c73a196978acf18e554e4b0 +Author: Mark Schouten +Date: Mon Nov 23 14:10:17 2015 +0100 + + Add PowerDNS Authoritative Agent + +commit d16462bb5ac978cfd5b7cb213359989b2aabc791 +Author: Mark Schouten +Date: Mon Nov 23 14:10:15 2015 +0100 + + Add PowerDNS Authoritative Agent + +commit 8e847b986aa3af50eb6c2302c3d1f0df158a47bd +Merge: da7e40c 66d5028 +Author: Neil Lathwood +Date: Wed Nov 11 17:17:24 2015 -0400 + + Merge pull request #15 from SaaldjorMike/mysql1 + + Moved mysql tag a bit up and added a newline to error msg. + +commit 66d502837d2643c59d7f87af076fd851b0ba12c1 +Author: Mike Rostermund +Date: Wed Nov 11 14:21:49 2015 +0100 + + Moved mysql tag a bit up and added a newline to error msg. + +commit da7e40c43eb3155d3253c1eb695a78a0d9362a51 +Merge: f6f0079 0cc7b49 +Author: Neil Lathwood +Date: Tue Nov 10 08:08:34 2015 -0400 + + Merge pull request #14 from tuxis-ie/ceph-support + + Ceph support + +commit 0cc7b493978c06f0f3e73749bac1fbadf56c1be8 +Author: Mark Schouten +Date: Tue Nov 10 11:00:58 2015 +0100 + + Add support for Ceph + +commit 9b4c3b34009a441df579051336bf3ea0647fe73c +Author: Mark Schouten +Date: Tue Nov 10 10:58:24 2015 +0100 + + Add support for Ceph + +commit f6f0079c6620ee3d75adf7511006006353903dd3 +Merge: d90957a 30b7651 +Author: Daniel Preussker +Date: Wed Nov 4 13:42:29 2015 +0000 + + Merge pull request #13 from tuxis-ie/master + + Crap, forgot this line... + +commit 30b7651e0142826202276a7bf9a31343d759c68a +Author: Mark Schouten +Date: Wed Nov 4 14:40:19 2015 +0100 + + Crap, forgot this line... + +commit d90957a0bc9e484056eaf26b206672b940fc7a9f +Merge: 25fcd5a 6554087 +Author: Daniel Preussker +Date: Wed Nov 4 13:35:33 2015 +0000 + + Merge pull request #12 from tuxis-ie/master + + Fix the proxmox-agent for Proxmox VE 4.0 + +commit 65540872e7a1215cfdca1d4b480670a67cf50a77 +Author: Mark Schouten +Date: Wed Nov 4 14:30:21 2015 +0100 + + Fix the proxmox-agent for Proxmox VE 4.0 + +commit 25fcd5ae76682006ed61aa09212738381968208f +Merge: 20e2d22 b6bfbba +Author: Paul Gear +Date: Mon Oct 26 09:39:15 2015 +1000 + + Merge pull request #10 from librenms/laf-patch-1 + + Update distro to use env + +commit b6bfbbaf2c99945aceb92e9c7f950a53196c26fc +Author: Neil Lathwood +Date: Sun Oct 25 21:51:43 2015 +0000 + + Update distro to use env + +commit 20e2d220bde9e4edec76d00551c955274d06130c +Merge: 87a20db 2b96259 +Author: Daniel Preussker +Date: Fri Aug 28 09:07:49 2015 +0000 + + Merge pull request #7 from tuxis-ie/master + + Add a proxmox-agent + +commit 2b9625953240ade30cf5ccef22a9293a016b819b +Author: Mark Schouten +Date: Fri Aug 28 10:52:04 2015 +0200 + + Add license + +commit d6795c60a171eba023b8c0e5b151376c6bcfa0d1 +Author: Mark Schouten +Date: Fri Aug 28 10:49:24 2015 +0200 + + Add proxmox-agent + +commit fee2ed820bedb4613871aa9747b40121e3ae7879 +Author: Mark Schouten +Date: Fri Aug 28 10:49:19 2015 +0200 + + Add proxmox-agent + +commit 87a20db845517070fdb2eec70d264e18bfde2871 +Merge: 8ae2b15 6493263 +Author: Daniel Preussker +Date: Thu Aug 20 17:14:11 2015 +0000 + + Merge pull request #5 from tuxis-ie/master + + Add files to create a Debian-package + +commit 64932630f0b67e876d0859df491705b11a71aa07 +Author: Mark Schouten +Date: Thu Aug 20 14:18:10 2015 +0200 + + Do not include the README in the repodir + +commit 77864124dc119b0d89b1c852090e5f283b02123a +Author: Mark Schouten +Date: Thu Aug 20 10:34:50 2015 +0200 + + Add license + +commit 8ae2b1520b9e75583b87977427415c90256473e1 +Merge: 69551b0 63d3166 +Author: Daniel Preussker +Date: Tue Aug 18 15:14:00 2015 +0000 + + Merge pull request #6 from librenms/f0o-mysql-host-logic + + Fix MySQL Host Logic + +commit 63d31665cea2afaeadb8c8ba1b58b37605597b80 +Author: Daniel Preussker +Date: Tue Aug 18 15:08:50 2015 +0000 + + Fix MySQL Host Logic + +commit 51270e24c19bed95030a41e3ab7828bb2330d68d +Author: Mark Schouten +Date: Mon Aug 17 16:58:33 2015 +0200 + + Also include distro in this package + +commit 2b4d17280dd4cbff1b497e2f6ffc17bf75020ea9 +Author: Mark Schouten +Date: Mon Aug 17 16:57:48 2015 +0200 + + Strip comments (on Qemu boxes, this pollutes a lot + +commit 2833310e228e185e78ddbb96589f63e9d2d7b852 +Author: Mark Schouten +Date: Mon Aug 17 16:50:26 2015 +0200 + + Enable dpkg and dmi by default + +commit 3cd06768b5487261ddde819aad6428a3183ffbbf +Author: Mark Schouten +Date: Mon Aug 17 16:48:22 2015 +0200 + + Place all plugins in a repo-dir and add mk_enplug to enable plugins + +commit 7954d5a085f0ffe31fa1becb6d3132ca63b46942 +Author: Mark Schouten +Date: Mon Aug 17 16:19:04 2015 +0200 + + Add Conflicts/Provides and fix location for xinetd.d + +commit a7df28415a4645293835c79d15201539376be11d +Author: Mark Schouten +Date: Mon Aug 17 15:12:12 2015 +0200 + + Add files to create a Debian-package + +commit 69551b05e2673c899077a4539d1b6a6ec95b4290 +Merge: cfec5ec 4683c68 +Author: Daniel Preussker +Date: Tue Jul 28 20:11:44 2015 +0000 + + Merge pull request #4 from alangregory/master + + Added Snmpd.conf example and distro executable + +commit 4683c68d1d23f63ff9977c8a11543004cd4b8a34 +Author: Alan Gregory +Date: Tue Jul 28 15:58:29 2015 -0300 + + Added Snmpd.conf example and distro executable + +commit cfec5ec65dc93a6bc9260eb4f1d3f9379d1c7287 +Author: Daniel Preussker +Date: Tue Jun 9 17:34:00 2015 +0000 + + Delete README.md + +commit f1c9d6578a9f5df51047e5246624a96e55e043d4 +Merge: a47d95b 195a46c +Author: Daniel Preussker +Date: Mon May 18 13:07:29 2015 +0200 + + Merge pull request #1 from f0o/master + + Initial commit + +commit 195a46c1e377f6729acf38f294153ef40147d2ff +Author: f0o +Date: Mon May 18 10:57:45 2015 +0000 + + Initial commit + +commit a47d95b58cc05e32a3feaa7f0022857da80ba58a +Author: Daniel Preussker +Date: Mon May 18 09:28:15 2015 +0000 + + Initial commit diff --git a/snmp/mdadm b/snmp/mdadm new file mode 100755 index 000000000..fdd40b983 --- /dev/null +++ b/snmp/mdadm @@ -0,0 +1,104 @@ +#!/bin/bash + +CAT=/bin/cat +LS=/bin/ls + +CONFIGFILE=$0.conf +if [ -f $CONFIGFILE ] ; then + . $CONFIGFILE +fi + +VERSION=1 +ERROR_CODE=0 +ERROR_STRING="" + +OUTPUT_DATA='['\ + +if [ -d /dev/md ] ; then + for RAID in /sys/block/md* ; do + + # ignore arrays with no slaves + if [ -z "$($LS -1 $RAID/slaves)" ] ; then + continue + fi + # ignore "non existing" arrays + if [ ! -f "$RAID/md/degraded" ] ; then + continue + fi + + RAID_NAME=$(basename $RAID) + RAID_DEV_LIST=$($LS $RAID/slaves/) + RAID_LEVEL=$($CAT $RAID/md/level) + RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) + RAID_STATE=$($CAT $RAID/md/array_state) + RAID_ACTION=$($CAT $RAID/md/sync_action) + RAID_DEGRADED=$($CAT $RAID/md/degraded) + + if [ "$RAID_SYNC_SPEED" = "none" ] ; then + RAID_SYNC_SPEED=0 + else + let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" + fi + + if [ "$($CAT $RAID/md/sync_completed)" = "none" ] ; then + RAID_SYNC_COMPLETED=100 + else + let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" + fi + + # divide with 2 to size like in /proc/mdstat + # and multiply with 1024 to get size in bytes + let "RAID_SIZE=$($CAT $RAID/size)*1024/2" + + RAID_DEVICE_LIST='[' + ALL_DEVICE_COUNT=0 + for D in $RAID_DEV_LIST ; do + RAID_DEVICE_LIST=$RAID_DEVICE_LIST'"'$D'",' + let "ALL_DEVICE_COUNT+=1" + done + if [ ${#RAID_DEVICE_LIST} -gt 3 ] ; then + RAID_DEVICE_LIST=${RAID_DEVICE_LIST: : -1} + fi + RAID_DEVICE_LIST=$RAID_DEVICE_LIST']' + + RAID_MISSING_DEVICES='[' + for D in $RAID_DEV_LIST ; do + if [ -L $RAID/slaves/$D ] && [ -f $RAID/slaves/$D ] ; then + RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' + fi + done + if [ ${#RAID_MISSING_DEVICES} -gt 3 ] ; then + RAID_MISSING_DEVICES=${RAID_MISSING_DEVICES: : -1} + fi + RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' + + let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" + + ARRAY_DATA='{'\ +'"name":"'$RAID_NAME\ +'","level":"'$RAID_LEVEL\ +'","size":"'$RAID_SIZE\ +'","disc_count":"'$RAID_DISC_COUNT\ +'","hotspare_count":"'$RAID_HOTSPARE_COUNT\ +'","device_list":'$RAID_DEVICE_LIST\ +',"missing_device_list":'$RAID_MISSING_DEVICES\ +',"state":"'$RAID_STATE\ +'","action":"'$RAID_ACTION\ +'","degraded":"'$RAID_DEGRADED\ +'","sync_speed":"'$RAID_SYNC_SPEED\ +'","sync_completed":"'$RAID_SYNC_COMPLETED\ +'"},' + + OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA + done +fi + +OUTPUT_DATA=${OUTPUT_DATA: : -1}']' + +OUTPUT='{"data":'$OUTPUT_DATA\ +',"error":"'$ERROR_CODE\ +'","errorString":"'$ERROR_STRING\ +'","version":"'$VERSION'"}' + +echo $OUTPUT + From 233a7d3e0b2d6648cdd31f8dc825714d7010636b Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 9 May 2019 22:32:54 +0200 Subject: [PATCH 202/497] fix configfile declaration --- snmp/dhcp-status.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 972214718..458cd02f5 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -13,7 +13,7 @@ BIN_SED='/usr/bin/sed' BIN_SORT='/usr/bin/sort' BIN_WC='/usr/bin/wc' -CONFIGFILE=dhcp-status.conf +CONFIGFILE=/etc/snmp/dhcp-status.conf if [ -f $CONFIGFILE ] ; then . dhcp-status.conf fi From 2eb26bf5a02095a3fc36d0d95049203942431791 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 14 May 2019 00:55:02 +0200 Subject: [PATCH 203/497] mdadm config file fix (#233) --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index fdd40b983..f6340d487 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -3,7 +3,7 @@ CAT=/bin/cat LS=/bin/ls -CONFIGFILE=$0.conf +CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then . $CONFIGFILE fi From 97a93f0d18d8ba9fc2ce3536063c3acbd018a3e6 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 23 May 2019 23:57:56 +0200 Subject: [PATCH 204/497] enhance smart to show power_on_hours also --- snmp/smart | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index e31fd8838..309d8f81f 100755 --- a/snmp/smart +++ b/snmp/smart @@ -267,7 +267,6 @@ foreach my $line ( @disks ){ }else{ $output=`$smartctl -A /dev/$disk`; } - my %IDs=( '5'=>'null', '10'=>'null', '173'=>'null', @@ -284,6 +283,7 @@ foreach my $line ( @disks ){ '199'=>'null', '231'=>'null', '233'=>'null', + '9'=>'null', ); my @outputA=split( /\n/, $output ); @@ -317,6 +317,12 @@ foreach my $line ( @disks ){ $IDs{$id}=$raw; } + # 9, power on hours + if ( $id == 9 ) { + my @runtime=split(/\ /, $raw); + $IDs{$id}=$runtime[0]; + } + # 188, Command_Timeout if ( $id == 188 ) { my $total=0; @@ -331,14 +337,13 @@ foreach my $line ( @disks ){ # 190, airflow temp # 194, temp - if ( + if ( ( $id == 190 ) || ( $id == 194 ) ) { my ( $temp )=split(/\ /, $raw); $IDs{$id}=$temp; } - } # SAS Wrapping @@ -398,7 +403,7 @@ foreach my $line ( @disks ){ $toReturn=$toReturn.$disk_id.','.$IDs{'5'}.','.$IDs{'10'}.','.$IDs{'173'}.','.$IDs{'177'}.','.$IDs{'183'}.','.$IDs{'184'}.','.$IDs{'187'}.','.$IDs{'188'} .','.$IDs{'190'} .','.$IDs{'194'}.','.$IDs{'196'}.','.$IDs{'197'}.','.$IDs{'198'}.','.$IDs{'199'}.','.$IDs{'231'}.','.$IDs{'233'}.','. - $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective."\n"; + $completed.','.$interrupted.','.$read_failure.','.$unknown_failure.','.$extended.','.$short.','.$conveyance.','.$selective.','.$IDs{'9'}."\n"; } From 173953a207e7974d6c8746da98aaac8bcf60939a Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 10:58:35 +0800 Subject: [PATCH 205/497] add nginx agent use python3 --- snmp/nginx-python3.py | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100755 snmp/nginx-python3.py diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py new file mode 100755 index 000000000..4bfb3a402 --- /dev/null +++ b/snmp/nginx-python3.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python3 +from urllib.request import urlopen +import re + +data = urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From 8a37ed581c004ea279c863e0ebc5183cf84a3f71 Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 12:27:53 +0800 Subject: [PATCH 206/497] reformat --- snmp/nginx-python3.py | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py index 4bfb3a402..e2a64118d 100755 --- a/snmp/nginx-python3.py +++ b/snmp/nginx-python3.py @@ -7,22 +7,22 @@ params = {} for line in data.decode().split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print(Active) - else: - print(params[param]) + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From 9f6927383dade45cc9c289de1a616a91e649945a Mon Sep 17 00:00:00 2001 From: Shao Yu Lung Date: Mon, 10 Jun 2019 12:28:13 +0800 Subject: [PATCH 207/497] add nginx agent use python3 --- agent-local/nginx-python3.py | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100755 agent-local/nginx-python3.py diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py new file mode 100755 index 000000000..2464f89d1 --- /dev/null +++ b/agent-local/nginx-python3.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python3 +from urllib.request import urlopen +import re + +data = urlopen('http://127.0.0.1/nginx-status').read() + +params = {} + +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +print("<<>>\n") + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) From dc1084daeb8a0946be2728c5c4ecdcec16996139 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 15 Jun 2019 14:40:55 +0200 Subject: [PATCH 208/497] fixing configfile call (#235) --- snmp/dhcp-status.sh | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh index 458cd02f5..a629d0a32 100755 --- a/snmp/dhcp-status.sh +++ b/snmp/dhcp-status.sh @@ -4,7 +4,6 @@ # edit your snmpd.conf add the below line and restart snmpd # # extend dhcpstats /opt/dhcp-status.sh # ################################################################ - FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' BIN_CAT='/usr/bin/cat' BIN_GREP='/usr/bin/grep' @@ -15,7 +14,7 @@ BIN_WC='/usr/bin/wc' CONFIGFILE=/etc/snmp/dhcp-status.conf if [ -f $CONFIGFILE ] ; then - . dhcp-status.conf + . $CONFIGFILE fi DHCP_LEASES='^lease' From 147187d9a76311ca7a671266d8fa9e9acf2c97f4 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 25 Jun 2019 02:13:05 -0500 Subject: [PATCH 209/497] fix occasionally random ordering --- snmp/ups-apcups | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/snmp/ups-apcups b/snmp/ups-apcups index f3f45d7df..dd377048b 100755 --- a/snmp/ups-apcups +++ b/snmp/ups-apcups @@ -103,9 +103,14 @@ if ( $toReturn{error} == 0 ){ # pulls apart the output my @lines=split(/\n/, $apcaccess_output); foreach my $line ( @lines ){ - my ( $var, $val )=split(/\ *\:\ */, $line, 2); - $val=~s/\ .*//; - $status{$var}=$val; + my ( $var, $val )=split(/\:\ */, $line, 2); + if ( + defined( $var ) && defined( $val ) + ){ + $var=~s/\ .*//; + $val=~s/\ .*//; + $status{$var}=$val; + } } #pull the desired variables from the output @@ -123,6 +128,7 @@ $toReturn{data}=\%data; # convert $toReturn to JSON and pretty print if asked to my $j=JSON->new; +$j->canonical(1); if ( $opts{p} ){ $j->pretty(1); } From bbb2e795114498e990c3a08c2a84da7fd59ae3b2 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 25 Jun 2019 02:17:47 -0500 Subject: [PATCH 210/497] indent fix --- snmp/ups-apcups | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) diff --git a/snmp/ups-apcups b/snmp/ups-apcups index dd377048b..5cd682c04 100755 --- a/snmp/ups-apcups +++ b/snmp/ups-apcups @@ -84,29 +84,28 @@ my $apcaccess_output=`$apcaccess`; $toReturn{error}=$?; # check for bad exit codes -if ( $? == -1){ +if ( $? == -1) { $toReturn{errorString}='failed to run apcaccess'; -} -elsif ($? & 127) { +} elsif ($? & 127) { $toReturn{errorString}= sprintf "apcaccess died with signal %d, %s coredump\n", - ($? & 127), ($? & 128) ? 'with' : 'without'; + ($? & 127), ($? & 128) ? 'with' : 'without'; } else { - $toReturn{error}=$? >> 8; - $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; + $toReturn{error}=$? >> 8; + $toReturn{errorString}="apcaccess exited with ".$toReturn{error}; } # if no bad exit codes, we can process $apcaccess_output -if ( $toReturn{error} == 0 ){ +if ( $toReturn{error} == 0 ) { # holds the found data for the apcupsd status my %status; # pulls apart the output my @lines=split(/\n/, $apcaccess_output); - foreach my $line ( @lines ){ + foreach my $line ( @lines ) { my ( $var, $val )=split(/\:\ */, $line, 2); if ( - defined( $var ) && defined( $val ) - ){ + defined( $var ) && defined( $val ) + ) { $var=~s/\ .*//; $val=~s/\ .*//; $status{$var}=$val; @@ -129,11 +128,11 @@ $toReturn{data}=\%data; # convert $toReturn to JSON and pretty print if asked to my $j=JSON->new; $j->canonical(1); -if ( $opts{p} ){ - $j->pretty(1); +if ( $opts{p} ) { + $j->pretty(1); } print $j->encode( \%toReturn ); -if (! $opts{p} ){ - print "\n"; +if (! $opts{p} ) { + print "\n"; } exit 0; From 50481feaf23bc7d6fecc0de732506d4cf8e015c1 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 30 Jun 2019 22:50:29 +0200 Subject: [PATCH 211/497] mdadm name independent mdadm array detection --- snmp/mdadm | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index f6340d487..ed291bccd 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -2,6 +2,8 @@ CAT=/bin/cat LS=/bin/ls +BASENAME=/usr/bin/basename +REALPATH=/usr/bin/realpath CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then @@ -15,7 +17,8 @@ ERROR_STRING="" OUTPUT_DATA='['\ if [ -d /dev/md ] ; then - for RAID in /sys/block/md* ; do + for ARRAY_BLOCKDEVICE in $(ls -1 /dev/md/*) ; do + RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves if [ -z "$($LS -1 $RAID/slaves)" ] ; then @@ -26,7 +29,11 @@ if [ -d /dev/md ] ; then continue fi - RAID_NAME=$(basename $RAID) + if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]]; then + RAID_NAME=$($BASENAME $RAID) + else + RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) + fi RAID_DEV_LIST=$($LS $RAID/slaves/) RAID_LEVEL=$($CAT $RAID/md/level) RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) From 0e46840a4a20a00f40b15aa4a5575ceec153bfd7 Mon Sep 17 00:00:00 2001 From: Zmegolaz Date: Mon, 1 Jul 2019 22:00:41 +0200 Subject: [PATCH 212/497] Split SMART power on hours on h too, not only space --- snmp/smart | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/smart b/snmp/smart index 309d8f81f..75a02ff0e 100755 --- a/snmp/smart +++ b/snmp/smart @@ -319,7 +319,7 @@ foreach my $line ( @disks ){ # 9, power on hours if ( $id == 9 ) { - my @runtime=split(/\ /, $raw); + my @runtime=split(/[\ h]/, $raw); $IDs{$id}=$runtime[0]; } From 59abe10656866f4207d245e0fd5cfd5e8584bec8 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 11:10:29 +0200 Subject: [PATCH 213/497] Detect current Ceph version and change statistics commands based on those versions. Fixes #186 --- agent-local/ceph | 23 +++++++++++++++++++---- 1 file changed, 19 insertions(+), 4 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 29298510a..9a83d3989 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -17,6 +17,11 @@ from subprocess import check_output import json +def cephversion(): + cephv = check_output(["/usr/bin/ceph", "version"]).replace('ceph version ', '') + major, minor = cephv.split('.')[0:2] + return [int(major), int(minor)] + def cephdf(): cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') @@ -44,12 +49,18 @@ def cephdf(): def osdperf(): + global major osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0') - for o in json.loads(osdperf)['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + if major > 13: + for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: + print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + else: + for o in json.loads(osdperf)['osd_perf_infos']: + print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) def poolstats(): + global major poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0') for p in json.loads(poolstats): @@ -62,12 +73,17 @@ def poolstats(): except: w = 0 try: - o = p['client_io_rate']['op_per_sec'] + if major > 11: + o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] + else: + o = p['client_io_rate']['op_per_sec'] except: o = 0 print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) +major, minor = cephversion() + print "<<>>" print "" poolstats() @@ -75,4 +91,3 @@ print "" osdperf() print "" cephdf() - From c8afadd43a69d2b04296d8556c6d91482537a330 Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 12:05:14 +0200 Subject: [PATCH 214/497] Copy snmp script inside the package as well --- Makefile | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 31d16d2df..ac4690ce5 100644 --- a/Makefile +++ b/Makefile @@ -4,7 +4,9 @@ install: mkdir -p $(PREFIX)/usr/lib/check_mk_agent/plugins mkdir -p $(PREFIX)/usr/lib/check_mk_agent/repo mkdir -p $(PREFIX)/usr/lib/check_mk_agent/local - cp -r agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ + mkdir -p $(PREFIX)/usr/share/librenms-agent/snmp + cp -rL agent-local/* $(PREFIX)/usr/lib/check_mk_agent/repo/ + cp -rL snmp/* $(PREFIX)/usr/share/librenms-agent/snmp rm $(PREFIX)/usr/lib/check_mk_agent/repo/README mkdir -p $(PREFIX)/usr/bin install -m 0750 check_mk_agent $(PREFIX)/usr/bin/check_mk_agent From 271e59034fe55f752156b7ba027ddd6cd75664cb Mon Sep 17 00:00:00 2001 From: Mark Schouten Date: Wed, 3 Jul 2019 12:06:15 +0200 Subject: [PATCH 215/497] New debian changelog --- debian/changelog | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/debian/changelog b/debian/changelog index 222de759c..8fcd34695 100644 --- a/debian/changelog +++ b/debian/changelog @@ -1,3 +1,12 @@ +librenms-agent (1.1.0) stable; urgency=low + + - New upstream versions + - Include SNMP scripts + - Fix Ceph scripts + - Fix nginx scripts + + -- Mark Schouten Wed, 03 Jul 2019 12:06:00 +0200 + librenms-agent (1.0.7) stable; urgency=low - New upstream versions From 7fc9d85edc43832e648fddfc616b09ee1afee01c Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Wed, 10 Jul 2019 11:48:49 +0800 Subject: [PATCH 216/497] add zfs support for freebsd use python 3 copy from #166. --- snmp/zfs-freebsd.py | 116 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 116 insertions(+) create mode 100644 snmp/zfs-freebsd.py diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py new file mode 100644 index 000000000..3ef92c889 --- /dev/null +++ b/snmp/zfs-freebsd.py @@ -0,0 +1,116 @@ +#!/usr/local/bin/python3 + +# FreeNAS 11.1 not support #!/usr/bin/env python3 + +import json +import subprocess + +def percent(numerator, denominator, default=0): + try: + return numerator / denominator * 100 + except ZeroDivisionError: + return default + +def main(args): + p = subprocess.run(['/sbin/sysctl', '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) + + if p.returncode != 0: + return p.returncode + + def chomp(line): + bits = [b.strip() for b in line.split(':')] + return bits[0], int(bits[1]) + stats = dict(chomp(l) for l in p.stdout.splitlines()) + if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: + stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 + + output = dict() + + # ARC misc + output['deleted'] = stats['kstat.zfs.misc.arcstats.deleted'] + output['evict_skip'] = stats['kstat.zfs.misc.arcstats.evict_skip'] + output['mutex_skip'] = stats['kstat.zfs.misc.arcstats.mutex_miss'] + output['recycle_miss'] = stats['kstat.zfs.misc.arcstats.recycle_miss'] + + # ARC size + output['target_size_per'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + output['arc_size_per'] = stats['kstat.zfs.misc.arcstats.size'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + output['target_size_arat'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] + output['min_size_per'] = stats['kstat.zfs.misc.arcstats.c_min'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 + + output['arc_size'] = stats['kstat.zfs.misc.arcstats.size'] + output['target_size_max'] = stats['kstat.zfs.misc.arcstats.c_max'] + output['target_size_min'] = stats['kstat.zfs.misc.arcstats.c_min'] + output['target_size'] = stats['kstat.zfs.misc.arcstats.c'] + + # ARC size breakdown + output['mfu_size'] = stats['kstat.zfs.misc.arcstats.size'] - stats['kstat.zfs.misc.arcstats.p'] + output['p'] = stats['kstat.zfs.misc.arcstats.p'] + output['rec_used_per'] = stats['kstat.zfs.misc.arcstats.p'] / stats['kstat.zfs.misc.arcstats.size'] * 100 + output['freq_used_per'] = output['mfu_size'] / stats['kstat.zfs.misc.arcstats.size'] * 100 + + # ARC misc efficiency stats + output['arc_hits'] = stats['kstat.zfs.misc.arcstats.hits'] + output['arc_misses'] = stats['kstat.zfs.misc.arcstats.misses'] + output['demand_data_hits'] = stats['kstat.zfs.misc.arcstats.demand_data_hits'] + output['demand_data_misses'] = stats['kstat.zfs.misc.arcstats.demand_data_misses'] + output['demand_meta_hits'] = stats['kstat.zfs.misc.arcstats.demand_metadata_hits'] + output['demand_meta_misses'] = stats['kstat.zfs.misc.arcstats.demand_metadata_misses'] + output['mfu_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mfu_ghost_hits'] + output['mfu_hits'] = stats['kstat.zfs.misc.arcstats.mfu_hits'] + output['mru_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mru_ghost_hits'] + output['mru_hits'] = stats['kstat.zfs.misc.arcstats.mru_hits'] + output['pre_data_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_data_hits'] + output['pre_data_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_data_misses'] + output['pre_meta_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_hits'] + output['pre_meta_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_misses'] + + output['anon_hits'] = output['arc_hits'] - (output['mfu_hits'] + output['mru_hits'] + output['mfu_ghost_hits'] + output['mru_ghost_hits']) + output['arc_accesses_total'] = output['arc_hits'] + output['arc_misses'] + output['demand_data_total'] = output['demand_data_hits'] + output['demand_data_misses'] + output['pre_data_total'] = output['pre_data_hits'] + output['pre_data_misses'] + output['real_hits'] = output['mfu_hits'] + output['mru_hits'] + + # ARC efficiency percents + output['cache_hits_per'] = percent(output['arc_hits'], output['arc_accesses_total']) + output['cache_miss_per'] = percent(output['arc_misses'], output['arc_accesses_total']) + output['actual_hit_per'] = percent(output['real_hits'], output['arc_accesses_total']) + output['data_demand_per'] = percent(output['demand_data_hits'], output['demand_data_total']) + output['data_pre_per'] = percent(output['pre_data_hits'], output['pre_data_total']) + output['anon_hits_per'] = percent(output['anon_hits'], output['arc_hits']) + output['mru_per'] = percent(output['mru_hits'], output['arc_hits']) + output['mfu_per'] = percent(output['mfu_hits'], output['arc_hits']) + output['mru_ghost_per'] = percent(output['mru_ghost_hits'], output['arc_hits']) + output['mfu_ghost_per'] = percent(output['mfu_ghost_hits'], output['arc_hits']) + output['demand_hits_per'] = percent(output['demand_data_hits'], output['arc_hits']) + output['pre_hits_per'] = percent(output['pre_data_hits'], output['arc_hits']) + output['meta_hits_per'] = percent(output['demand_meta_hits'], output['arc_hits']) + output['pre_meta_hits_per'] = percent(output['pre_meta_hits'], output['arc_hits']) + output['demand_misses_per'] = percent(output['demand_data_misses'], output['arc_misses']) + output['pre_misses_per'] = percent(output['pre_data_misses'], output['arc_misses']) + output['meta_misses_per'] = percent(output['demand_meta_misses'], output['arc_misses']) + output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) + + # pools + p = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + if p.returncode != 0: + return p.returncode + output['pools'] = [] + fields = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) + + print(json.dumps(output)) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From 5d442f2b33a8a02e229e4b45b3d836a6f81e7b50 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 8 Aug 2019 21:18:06 -0500 Subject: [PATCH 217/497] /tmp fix (#250) * convert the snmp scripts using tmp files over to use mktemp * reverse this... joy... not a temp file but cache file ;( * moved cache file from under /tmp to /var/cache/librenms * fix mysql tmp usage --- agent-local/apache | 2 +- agent-local/dpkg | 2 +- agent-local/mysql | 8 ++++---- agent-local/nfsstats | 4 ++-- agent-local/rpm | 2 +- snmp/apache-stats | 2 +- snmp/apache-stats.py | 2 +- snmp/mysql | 8 ++++---- snmp/nfs-stats.sh | 6 +++--- snmp/powerdns-dnsdist | 2 +- 10 files changed, 19 insertions(+), 19 deletions(-) diff --git a/agent-local/apache b/agent-local/apache index c0d9a795e..a5f0927cd 100755 --- a/agent-local/apache +++ b/agent-local/apache @@ -14,7 +14,7 @@ use LWP::Simple; $CACHETIME = 30; -$CACHEFILE = '/tmp/agent-local-apache'; +$CACHEFILE = '/var/cache/librenms/agent-local-apache'; # check for cache file newer CACHETIME seconds ago if ( -f $CACHEFILE && time - (stat( $CACHEFILE ))[9] < $CACHETIME) { diff --git a/agent-local/dpkg b/agent-local/dpkg index 1c3d08011..70917ba72 100755 --- a/agent-local/dpkg +++ b/agent-local/dpkg @@ -4,7 +4,7 @@ # We cache because it is a 1sec delay, which is painful for the poller if [ -x /usr/bin/dpkg-query ]; then DATE=$(date +%s) - FILE=/tmp/agent-local-dpkg + FILE=/var/cache/librenms/agent-local-dpkg if [ ! -e $FILE ]; then dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE diff --git a/agent-local/mysql b/agent-local/mysql index 9277efc91..0b9419fd0 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -27,7 +27,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining +# Define MySQL connection constants in config.php. Instead of defining # parameters here, you can define them in another file named the same as this # file, with a .cnf extension. # ============================================================================ @@ -48,7 +48,7 @@ $heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if $heartbeat_table = 'percona.heartbeat'; # db.tbl. -$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$cache_dir = '/var/cache/librenms'; # If set, this uses caching to avoid multiple calls. $timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. @@ -286,7 +286,7 @@ function ss_get_mysql_stats( $options ) { $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; - + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); @@ -471,7 +471,7 @@ function ss_get_mysql_stats( $options ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) diff --git a/agent-local/nfsstats b/agent-local/nfsstats index 02a834cdb..404e2fd06 100755 --- a/agent-local/nfsstats +++ b/agent-local/nfsstats @@ -15,8 +15,8 @@ BIN_GREP='/usr/bin/grep' BIN_PASTE='/usr/bin/paste' BIN_RM='/usr/bin/rm' BIN_MV='/usr/bin/mv' -LOG_OLD='/tmp/nfsstats_old' -LOG_NEW='/tmp/nfsstats_new' +LOG_OLD='/var/cache/librenms/nfsstats_old' +LOG_NEW='/var/cache/librenms/nfsstats_new' $BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 diff --git a/agent-local/rpm b/agent-local/rpm index 88483be1d..b27c0c44e 100755 --- a/agent-local/rpm +++ b/agent-local/rpm @@ -4,7 +4,7 @@ # We cache because it is a 1sec delay, which is painful for the poller if [ -x /bin/rpm ]; then DATE=$(date +%s) - FILE=/tmp/agent-local-rpm + FILE=/var/cache/librenms/agent-local-rpm if [ ! -e $FILE ]; then /bin/rpm -q --all --queryformat '%{N} %{V} %{R} %{ARCH} %{SIZE}\n' > $FILE fi diff --git a/snmp/apache-stats b/snmp/apache-stats index 863514aea..ef6574639 100755 --- a/snmp/apache-stats +++ b/snmp/apache-stats @@ -14,7 +14,7 @@ use LWP::Simple; $CACHETIME = 30; -$CACHEFILE = '/tmp/snmp-cache-apache'; +$CACHEFILE = '/var/cache/librenms/snmp-cache-apache'; # check for cache file newer CACHETIME seconds ago if ( -f $CACHEFILE && time - (stat( $CACHEFILE ))[9] < $CACHETIME) { diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 378d858e8..f098a8c55 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -19,7 +19,7 @@ # CACHETIME = 30 -CACHEFILE = '/tmp/apache-snmp' +CACHEFILE = '/var/cache/librenms/apache-snmp' # check for cache file newer CACHETIME seconds ago import os diff --git a/snmp/mysql b/snmp/mysql index 27833e016..44e31e289 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -27,7 +27,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining +# Define MySQL connection constants in config.php. Instead of defining # parameters here, you can define them in another file named the same as this # file, with a .cnf extension. # ============================================================================ @@ -49,7 +49,7 @@ $heartbeat_utc = FALSE; # Whether pt-heartbeat is run with --utc option. $heartbeat_server_id = 0; # Server id to associate with a heartbeat. Leave 0 if no preference. $heartbeat_table = 'percona.heartbeat'; # db.tbl. -$cache_dir = '/tmp'; # If set, this uses caching to avoid multiple calls. +$cache_dir = '/var/cache/librenms'; # If set, this uses caching to avoid multiple calls. $timezone = null; # If not set, uses the system default. Example: "UTC" $cache_time = 30; # How long to cache data. @@ -289,7 +289,7 @@ function ss_get_mysql_stats( $options ) { $flags = isset($options['flags']) ? $options['flags'] : $mysql_flags; $connection_timeout = isset($options['connection-timeout']) ? $options['connection-timeout'] : $mysql_connection_timeout; $heartbeat_server_id = isset($options['server-id']) ? $options['server-id'] : $heartbeat_server_id; - + # If there is a port, or if it's a non-standard port, we add ":$port" to the # hostname. $host_str = $host.($port != 3306 ? ":$port" : ''); @@ -474,7 +474,7 @@ function ss_get_mysql_stats( $options ) { debug("Got nothing from SHOW SLAVE STATUS"); } } - + # Get SHOW MASTER STATUS, and add it to the $status array. if ($chk_options['master'] && array_key_exists('log_bin', $status) diff --git a/snmp/nfs-stats.sh b/snmp/nfs-stats.sh index 25bbb6b1e..22d8d3f8f 100755 --- a/snmp/nfs-stats.sh +++ b/snmp/nfs-stats.sh @@ -12,9 +12,9 @@ BIN_TR='/usr/bin/tr' BIN_PASTE='/usr/bin/paste' BIN_RM='/usr/bin/rm' BIN_MV='/usr/bin/mv' -LOG_OLD='/tmp/nfsio_old' -LOG_NEW='/tmp/nfsio_new' -LOG_FIX='/tmp/nfsio_fix' +LOG_OLD='/var/cache/librenms/nfsio_old' +LOG_NEW='/var/cache/librenms/nfsio_new' +LOG_FIX='/var/cache/librenms/nfsio_fix' #get reply cache (rc - values: hits, misses, nocache) $BIN_CAT $CFG_NFSFILE | $BIN_SED -n 1p | $BIN_AWK '{print $2,$3,$4}' | $BIN_TR " " "\n" > $LOG_NEW diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist index 87eda58bd..0572fb5cd 100644 --- a/snmp/powerdns-dnsdist +++ b/snmp/powerdns-dnsdist @@ -6,7 +6,7 @@ API_AUTH_USER="admin" API_AUTH_PASS="" API_URL="" API_STATS="jsonstat?command=stats" -TMP_FILE="/tmp/dnsdist_current.stats" +TMP_FILE=`/usr/bin/mktemp` #/ Description: BASH script to get PowerDNS dnsdist stats #/ Examples: ./powerdns-dnsdist From 1e1cb1df4947e644cb8286a593df531498e90139 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Bourqui?= Date: Sun, 11 Aug 2019 09:59:26 +0200 Subject: [PATCH 218/497] Add support for BSD and Illumos kstat --- snmp/zfs-linux | 47 ++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 40 insertions(+), 7 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index c5f36256c..3d1ab28cb 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -3,17 +3,45 @@ import json import subprocess def main(args): + LINUX = '/proc/spl/kstat/zfs/arcstats' + BSD1 = 'sysctl' + BSD2 = 'kstat.zfs.misc.arcstats' + ILLUMOS = 'kstat -n arcstats' + COLUMN = 1 + SPLIT = None res = {} - ARCSTATS = open('/proc/spl/kstat/zfs/arcstats', 'r') - LINES = ARCSTATS.readlines() + try: + LINES = open(LINUX, 'r').readlines() + COLUMN = 2 + + except IOError as e1: + try: + proc = subprocess.run([BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True) + LINES = proc.stdout.splitlines() + LINES = [x[len(BSD2)+1:] for x in LINES] + SPLIT = ':' + except FileNotFoundError as e2: + try: + proc = subprocess.run(ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True) + LINES = proc.stdout.splitlines() + except FileNotFoundError as e3: + print('Linux :', e1) + print('BSD :', e2) + print('Illumos:', e3) + return 1 + LINES = [x.strip() for x in LINES] - + STATS = {} for line in LINES[2:]: - splitline = line.split() - STATS[splitline[0]] = int(splitline[2]) - + splitline = line.split(SPLIT) + try: + STATS[splitline[0]] = int(splitline[COLUMN]) + # Skip non int value like Illumos crtime, empty line at the end + except: + continue + # ARC misc DELETED = STATS['deleted'] EVICT_SKIP = STATS['evict_skip'] @@ -100,7 +128,10 @@ def main(args): return proc.returncode pools = [] - FIELDS = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] + FIELDS = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup', 'health', 'altroot'] + if len(proc.stdout.splitlines()[0].split('\t')) == 10: + FIELDS.remove('ckpoint') + for line in proc.stdout.splitlines(): info = dict(zip(FIELDS, line.split('\t'))) @@ -109,6 +140,8 @@ def main(args): info['frag'] = 0 if info['frag'] == '-' else info['frag'] info['dedup'] = info['dedup'].rstrip('x') info['cap'] = info['cap'].rstrip('%') + if 'ckpoint' in info: + info['ckpoint'] = 0 if info['ckpoint'] == '-' else info['ckpoint'] pools.append(info) From bbdbdf2760a99e8d94c26ccd0f4fbd3fc1c3f58b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fr=C3=A9d=C3=A9ric=20Bourqui?= Date: Sun, 11 Aug 2019 10:08:20 +0200 Subject: [PATCH 219/497] DEMAND_ used instead of PREFETCH_METADATA_MISSES --- snmp/zfs-linux | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 3d1ab28cb..e193c5ea2 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -120,7 +120,7 @@ def main(args): DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 - PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From 5c3b1c1cb56784ee3eb6cc686f98eb528195c432 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Mon, 21 Oct 2019 04:03:13 +0200 Subject: [PATCH 220/497] Seafile Monitoring (#249) --- snmp/seafile.py | 211 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100755 snmp/seafile.py diff --git a/snmp/seafile.py b/snmp/seafile.py new file mode 100755 index 000000000..c34cf6e6e --- /dev/null +++ b/snmp/seafile.py @@ -0,0 +1,211 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- + +# https://download.seafile.com/published/web-api/v2.1-admin + +# user -> libraries (count) +# user -> trash-libraries (count) +# user -> space consumption (count) +# user -> is activated (bool) + +# connected_devices (count) +# groups (count) + +# Clients -> plattform (count) +# Clients -> version (count) + +import requests +import json + +# Configfile content example: +# {"url": "https://seafile.mydomain.org", +# "username": "some_admin_login@mail.address", +# "password": "password", +# "account_identifier": "name", +# "hide_monitoring_account": true +# } + +CONFIGFILE='/etc/snmp/seafile.json' +error = 0 +error_string = '' +version = 1 + + +def get_data(url_path, data=None, token=None): + complete_url = "%s/%s" % (url, url_path) + headers = {'Accept': 'application/json'} + if token: + headers['Authorization'] = "Token %s" % token + + try: + if token: + r = requests.get(complete_url, data=data, headers=headers) + else: + r = requests.post(complete_url, data=data, headers=headers) + try: + return r.json() + except json.decoder.JSONDecodeError: + return 'no valid json returned - url correct?' + except requests.exceptions.RequestException as err: + return str(err) + + +def get_devices(): + # get all devices + url_path = 'api/v2.1/admin/devices/' + return get_data(url_path, token=token) + + +def get_groups(): + # get all groups + url_path = 'api/v2.1/admin/groups/' + return get_data(url_path, token=token) + + +def get_sysinfo(): + # get all groups + url_path = 'api/v2.1/admin/sysinfo/' + return get_data(url_path, token=token) + + +def get_account_information(): + # get all accounts withs details + account_list = [] + for account in get_data('api2/accounts/', token=token): + + # get account details + url_path = 'api2/accounts/%s/' % account['email'] + account_data = get_data(url_path, token=token) + + # get libraries by owner + url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email'] + account_data['repos'] = get_data(url_path, token=token)['repos'] + + # get deleted libraries by owner + url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email'] + account_data['trash_repos'] = get_data(url_path, token=token)['repos'] + + account_list.append(account_data) + return account_list + + +def resort_devices(device_list): + data = {} + platform = {} + client_version = {} + for device in device_list: + # don't list information assigned to monitor account + if hide_monitoring_account: + if device['user'] == configfile['username']: + continue + + if device['platform'] not in platform.keys(): + platform[device['platform']] = 1 + else: + platform[device['platform']] += 1 + + if device['client_version'] not in client_version.keys(): + client_version[device['client_version']] = 1 + else: + client_version[device['client_version']] += 1 + + data['platform'] = [] + for k, v in platform.items(): + data['platform'].append({'os_name': k, + 'clients':v}) + data['client_version'] = [] + for k, v in client_version.items(): + data['client_version'].append({'client_version': k, + 'clients':v}) + + return data + + +def resort_groups(group_list): + data = {'count': len(group_list)} + return data + + +def resort_accounts(account_list): + if account_identifier in ['name', 'email']: + identifier = account_identifier + else: + identifier = 'name' + + accepted_key_list = ['is_active', 'usage'] + + data = [] + for user_account in account_list: + # don't list information assigned to monitor account + if hide_monitoring_account: + if user_account['email'] == configfile['username']: + continue + + new_account = {} + new_account['owner'] = user_account[identifier] + new_account['repos'] = len(user_account['repos']) + new_account['trash_repos'] = len(user_account['trash_repos']) + + for k in user_account.keys(): + if k not in accepted_key_list: + continue + new_account[k] = user_account[k] + data.append(new_account) + + return sorted(data, key=lambda k: k['owner'].lower()) + + +# ------------------------ MAIN -------------------------------------------------------- +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + error = 1 + error_string = "Configfile Error: '%s'" % e + +if not error: + url = configfile['url'] + username = configfile['username'] + password = configfile['password'] + try: + account_identifier = configfile['account_identifier'] + except KeyError: + account_identifier = None + try: + hide_monitoring_account = configfile['hide_monitoring_account'] + except KeyError: + hide_monitoring_account = False + + # get token + login_data = {'username': username, 'password': password} + ret = get_data('api2/auth-token/', data=login_data) + if type(ret) != str: + if 'token' in ret.keys(): + token = ret['token'] + else: + error = 1 + try: + error_string = json.dumps(ret) + except: + error_string = ret + else: + error = 1 + error_string = ret + +data = {} +if not error: + ret= get_account_information() +if not error: + data['accounts'] = resort_accounts(ret) + data['devices'] = resort_devices(get_devices()['devices']) + data['groups'] = resort_groups(get_groups()['groups']) + data['sysinfo'] = get_sysinfo() + +output = {'error': error, + 'errorString': error_string, + 'version': version, + 'data': data + } + +print(json.dumps(output)) + From 5d1960a83f95cd931fcc0ef5693c6019e897051e Mon Sep 17 00:00:00 2001 From: sparknsh <38894705+sparknsh@users.noreply.github.com> Date: Mon, 21 Oct 2019 11:18:08 -0400 Subject: [PATCH 221/497] Verion fix for NTP SNMP (#256) The version variable was not pulled right on centos based distros. This fixes the issue. --- snmp/ntp-client | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-client b/snmp/ntp-client index 04db80655..925155abe 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -28,7 +28,7 @@ NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= ' NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_VERSION=`$BIN_NTPD --version | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_HEAD -c 1` +NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'` echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' From a2db073368ccefab26940306243c11932a23c410 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 5 Nov 2019 22:55:41 +0100 Subject: [PATCH 222/497] snmp extend for monitoring certificate file validity (#239) --- snmp/certificate.py | 79 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 79 insertions(+) create mode 100755 snmp/certificate.py diff --git a/snmp/certificate.py b/snmp/certificate.py new file mode 100755 index 000000000..8957b7c9f --- /dev/null +++ b/snmp/certificate.py @@ -0,0 +1,79 @@ +#!/usr/bin/env python3 + +import socket +import ssl +import datetime +import json + + +CONFIGFILE='/etc/snmp/certificate.json' +# {"domains": [ +# {"fqdn": "www.mydomain.com"}, +# {"fqdn": "www2.mydomain.com"} +# ] +# } + + +def get_certificate_data(domain, port=443): + + context = ssl.create_default_context() + conn = context.wrap_socket( + socket.socket(socket.AF_INET), + server_hostname=domain, + ) + # 3 second timeout because Lambda has runtime limitations + conn.settimeout(3.0) + + try: + conn.connect((domain, port)) + error_msg = None + except ConnectionRefusedError as e: + error_msg = e + ssl_info = conn.getpeercert() + return ssl_info, error_msg + + +output = {} +output['error'] = 0 +output['errorString'] = "" +output['version'] = 1 + +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e + +if not output['error']: + output_data_list = [] + for domain in configfile['domains']: + output_data = {} + + if 'port' not in domain.keys(): + domain['port'] = 443 + certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port']) + + output_data['cert_name'] = domain['fqdn'] + + if not error_msg: + ssl_date_format = r'%b %d %H:%M:%S %Y %Z' + validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format) + validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format) + cert_age = datetime.datetime.now() - validity_start + cert_still_valid = validity_end - datetime.datetime.now() + + output_data['age'] = cert_age.days + output_data['remaining_days'] = cert_still_valid.days + + else: + output_data['age'] = None + output_data['remaining_days'] = None + output['error'] = 1 + output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg) + + output_data_list.append(output_data) + + output['data'] = output_data_list + +print(json.dumps(output)) From 259ef9affdfcd1e2a58f8898336ab15b34b5c761 Mon Sep 17 00:00:00 2001 From: Svennd Date: Mon, 25 Nov 2019 11:48:03 +0100 Subject: [PATCH 223/497] Update zfs-linux Traceback (most recent call last): File "./zfs-linux", line 178, in sys.exit(main(sys.argv[1:])) File "./zfs-linux", line 92, in main DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 ZeroDivisionError: division by zero --- snmp/zfs-linux | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index c5f36256c..87543f576 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -79,20 +79,20 @@ def main(args): ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 - MRU_PERCENT = MRU_HITS / ARC_HITS * 100 - MFU_PERCENT = MFU_HITS / ARC_HITS * 100 - MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 - MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 - - DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 - PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 - METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 - PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 - - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 - PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 + MRU_PERCENT = MRU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MFU_PERCENT = MFU_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MRU_GHOST_PERCENT = MRU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + MFU_GHOST_PERCENT = MFU_GHOST_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + + DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From fe30eec369cd2814923036e0bb1eb8d2c78b5250 Mon Sep 17 00:00:00 2001 From: Svennd Date: Tue, 10 Dec 2019 14:39:07 +0100 Subject: [PATCH 224/497] Update zfs-linux --- snmp/zfs-linux | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 37d747593..09b24cb11 100644 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -117,10 +117,10 @@ def main(args): METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 - PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_HITS != 0 else 0 + DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 # pools proc = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) From cd63bb5db327973b6aef3f79ace10f67643eb902 Mon Sep 17 00:00:00 2001 From: Dan Langille Date: Tue, 24 Dec 2019 14:56:52 -0500 Subject: [PATCH 225/497] Repair error When running this script, I get: Useless use of multiplication (*) in void context at ./zfs-freebsd line 163 --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 93e162476..f503bef13 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits / $demand_data_total * 100; + $demand_data_hits /= $demand_data_total * 100; } my $data_prefetch_percent=0; From 1f2e74b1812132e6ac51d119d71c36fb1899331f Mon Sep 17 00:00:00 2001 From: Dmkaz Date: Mon, 13 Jan 2020 12:00:09 -0500 Subject: [PATCH 226/497] Fix zfs-freebsd.py Capacity Output 'zpool-list -pH' returns capacity as a percentage which needs to be stripped as well as account for null (-) values. Additionally, the output now also includes 'CKPOINT' which needs to be added to the fields array so it splits correctly. --- snmp/zfs-freebsd.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 3ef92c889..276717aa6 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -96,16 +96,21 @@ def chomp(line): if p.returncode != 0: return p.returncode output['pools'] = [] - fields = ['name', 'size', 'alloc', 'free', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) + fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['ckpoint'] == '-': + p['ckpoint'] = 0 + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['cap'] = p['cap'].rstrip('%') + if p['cap'] == '-': + p['cap'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) print(json.dumps(output)) From 99919222e98553105a0843c5199e10fad00758e1 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 18 Jan 2020 19:37:09 +0100 Subject: [PATCH 227/497] Hotspare Count should not go below zero --- snmp/mdadm | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/mdadm b/snmp/mdadm index ed291bccd..8f8cffd5d 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -80,6 +80,9 @@ if [ -d /dev/md ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" + if [ $RAID_HOTSPARE_COUNT -lt 0 ]; then + RAID_HOTSPARE_COUNT=0 + fi ARRAY_DATA='{'\ '"name":"'$RAID_NAME\ From d62375e1cf534317b16519a5af5d121d8f4985b3 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 18 Jan 2020 21:50:12 +0100 Subject: [PATCH 228/497] adding space --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 8f8cffd5d..bc1d28291 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -80,7 +80,7 @@ if [ -d /dev/md ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ $RAID_HOTSPARE_COUNT -lt 0 ]; then + if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then RAID_HOTSPARE_COUNT=0 fi From 6484ad2a533bbf00b71ebf8a19a40c6e9eb7fd7f Mon Sep 17 00:00:00 2001 From: PipoCanaja <38363551+PipoCanaja@users.noreply.github.com> Date: Sun, 19 Jan 2020 10:40:39 +0100 Subject: [PATCH 229/497] fix $data_demand_percent calculation --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index f503bef13..e4d27cf80 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits /= $demand_data_total * 100; + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; } my $data_prefetch_percent=0; From 87cdaaad84f5ad66a3a212d2a236008ca6bf46d9 Mon Sep 17 00:00:00 2001 From: PipoCanaja <38363551+PipoCanaja@users.noreply.github.com> Date: Sun, 19 Jan 2020 10:47:30 +0100 Subject: [PATCH 230/497] Update zfs-freebsd.py --- snmp/zfs-freebsd.py | 28 ++++++++++++++-------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 276717aa6..2227598df 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -97,20 +97,20 @@ def chomp(line): return p.returncode output['pools'] = [] fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['ckpoint'] == '-': - p['ckpoint'] = 0 - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['cap'] = p['cap'].rstrip('%') - if p['cap'] == '-': - p['cap'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split('\t'))) + if p['ckpoint'] == '-': + p['ckpoint'] = 0 + if p['expandsz'] == '-': + p['expandsz'] = 0 + p['frag'] = p['frag'].rstrip('%') + if p['frag'] == '-': + p['frag'] = 0 + p['cap'] = p['cap'].rstrip('%') + if p['cap'] == '-': + p['cap'] = 0 + p['dedup'] = p['dedup'].rstrip('x') + output['pools'].append(p) print(json.dumps(output)) From e098a71e21d9f39d30b3e79bf100882704275fcb Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 22 Jan 2020 01:52:20 +0100 Subject: [PATCH 231/497] pureftp snmpd extension (#269) --- snmp/pureftpd.py | 66 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100755 snmp/pureftpd.py diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py new file mode 100755 index 000000000..6f10770b0 --- /dev/null +++ b/snmp/pureftpd.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +import os +import json + +CONFIGFILE = '/etc/snmp/pureftpd.json' + +pureftpwho_cmd = '/usr/sbin/pure-ftpwho' +pureftpwho_args = '-v -s -n' + + +output_data = {} +output_data['version'] = 1 +output_data['errorString'] = "" +output_data['error'] = 0 + + +if os.path.isfile(CONFIGFILE): + with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output_data['error'] = 1 + output_data['errorString'] = "Configfile Error: '%s'" % e +else: + configfile = None + +if not output_data['error'] and configfile: + try: + if 'pureftpwho_cmd' in configfile.keys(): + pureftpwho_cmd = configfile['pureftpwho_cmd'] + except KeyError: + output_data['error'] = 1 + output_data['errorString'] = "Configfile Error: '%s'" % e + + +output = os.popen('sudo ' + pureftpwho_cmd + ' ' + pureftpwho_args).read() + +data = {} + +for line in output.split('\n'): + if not len(line): + continue + + pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|') + + if "IDLE" in state: + state = "IDLE" + elif "DL" in state: + state = "DL" + elif "UL" in state: + state = "UL" + + if acct not in data.keys(): + data[acct] = {} + if state not in data[acct]: + data[acct][state] = {'bitrate': 0, + 'connections': 0 + } + bandwidth_bit = int(bandwidth) * 1024 + data[acct][state]['bitrate'] += bandwidth_bit + data[acct][state]['connections'] += 1 + +output_data['data'] = data + +print (json.dumps(output_data)) From 7c11c82ba4bc2b1ea61e145daf22257760019a68 Mon Sep 17 00:00:00 2001 From: fbourqui Date: Wed, 22 Jan 2020 11:35:58 +0100 Subject: [PATCH 232/497] fix useSN, selftest with disk defined as argument was resulting in /dev/dev.... parameter to smartctl -l and -i --- snmp/smart | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/snmp/smart b/snmp/smart index 75a02ff0e..3d8befdfe 100755 --- a/snmp/smart +++ b/snmp/smart @@ -260,13 +260,11 @@ foreach my $line ( @disks ){ $disk=$line; $name=$line; } - my $disk_sn=$disk; my $output; - if ( $disk =~ /\// ){ - $output=`$smartctl -A $disk`; - }else{ - $output=`$smartctl -A /dev/$disk`; + if ( $disk !~ /\// ){ + $disk = '/dev/'.$disk; } + $output=`$smartctl -A $disk`; my %IDs=( '5'=>'null', '10'=>'null', '173'=>'null', @@ -381,7 +379,7 @@ foreach my $line ( @disks ){ } #get the selftest logs - $output=`$smartctl -l selftest /dev/$disk`; + $output=`$smartctl -l selftest $disk`; @outputA=split( /\n/, $output ); my $completed=scalar grep(/Completed without error/, @outputA); my $interrupted=scalar grep(/Interrupted/, @outputA); @@ -395,7 +393,7 @@ foreach my $line ( @disks ){ # get the drive serial number, if needed my $disk_id=$name; if ( $useSN ){ - while (`$smartctl -i /dev/$disk` =~ /Serial Number:(.*)/g) { + while (`$smartctl -i $disk` =~ /Serial Number:(.*)/g) { $disk_id = $1; $disk_id =~ s/^\s+|\s+$//g; } From 4204f01ab711faf0d64e81df1a42f1236c56cbee Mon Sep 17 00:00:00 2001 From: fbourqui Date: Thu, 30 Jan 2020 13:03:04 +0100 Subject: [PATCH 233/497] Store Crucial SSD #202 in generic #231 (#272) --- snmp/smart | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/smart b/snmp/smart index 75a02ff0e..652d55758 100755 --- a/snmp/smart +++ b/snmp/smart @@ -298,6 +298,12 @@ foreach my $line ( @disks ){ my $raw=$lineA[9]; my $id=$lineA[0]; + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231}=$raw; + } + # single int raw values if ( ( $id == 5 ) || From 18a989651821e45a588e1c94e9d1e5988c74415e Mon Sep 17 00:00:00 2001 From: Joseph Tingiris Date: Mon, 3 Feb 2020 12:46:36 -0500 Subject: [PATCH 234/497] asterisk add iax2 stats (#274) * asterisk add iax2 stats * fix iax2 unmonitored variable; removed unnecessary newline --- snmp/asterisk | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/asterisk b/snmp/asterisk index 109aec4a4..7c6b7a19a 100644 --- a/snmp/asterisk +++ b/snmp/asterisk @@ -13,6 +13,7 @@ then echo "<<>>" $ASCLI -rx "core show channels" | awk '/active calls/ { print "Calls=" $1 } /active channels/ { print "Channels=" $1}' $ASCLI -rx 'sip show peers' | awk '/sip peers/ { print "SipPeers=" $1 "\nSipMonOnline=" $5 "\nSipMonOffline=" $7 "\nSipUnMonOnline=" $10 "\nSipUnMonOffline=" $12}' + $ASCLI -rx 'iax2 show peers' | awk '/iax2 peers/ { gsub("\\[",""); gsub("\\]",""); print "Iax2Peers=" $1 "\nIax2Online=" $4 "\nIax2Offline=" $6 "\nIax2Unmonitored=" $8}' else exit 0 From 877e449950100719f8c1670b7170f33aa58ea2fc Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 5 Feb 2020 23:34:42 +0100 Subject: [PATCH 235/497] set MDADM sync complete to 0 if on degraded array --- snmp/mdadm | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index bc1d28291..5265fe69f 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -47,10 +47,12 @@ if [ -d /dev/md ] ; then let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" fi - if [ "$($CAT $RAID/md/sync_completed)" = "none" ] ; then - RAID_SYNC_COMPLETED=100 - else + if [ "$($CAT $RAID/md/sync_completed)" != "none" ] ; then let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" + elif [ $RAID_DEGRADED -eq 1 ] ; then + RAID_SYNC_COMPLETED=0 + else + RAID_SYNC_COMPLETED=100 fi # divide with 2 to size like in /proc/mdstat From 90d0bab67a8800d583c9f2309c552a0f2550009b Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 12 Feb 2020 08:26:12 +0100 Subject: [PATCH 236/497] puppet agent monitoring script (#258) --- snmp/puppet_agent.py | 106 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 106 insertions(+) create mode 100755 snmp/puppet_agent.py diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py new file mode 100755 index 000000000..9d0f343cb --- /dev/null +++ b/snmp/puppet_agent.py @@ -0,0 +1,106 @@ +#!/usr/bin/env python3 + +import json +import yaml +from os.path import isfile +from time import time + + +output = {} +output['error'] = 0 +output['errorString'] = "" +output['version'] = 1 + +CONFIGFILE = '/etc/snmp/puppet.json' +# optional config file +# { +# "agent": { +# "summary_file": "/my/custom/path/to/summary_file" +# } +# } + + +summary_files = ['/var/cache/puppet/state/last_run_summary.yaml', + '/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml'] + + +def parse_yaml_file(filename): + try: + yaml_data = yaml.load(open(filename, 'r')) + msg = None + except yaml.scanner.ScannerError as e: + yaml_data = [] + msg = str(e) + except yaml.parser.ParserError as e: + yaml_data = [] + msg = str(e) + + return msg, yaml_data + + +def time_processing(data): + new_data = {} + + for k in data.keys(): + if k == 'last_run': + # generate difference to last run (seconds) + new_data[k] = round(time() - data[k]) + continue + new_data[k] = round(data[k], 2) + + return new_data + + +def processing(data): + new_data = {} + for k in ['changes', 'events', 'resources', 'version']: + new_data[k] = data[k] + + new_data['time'] = time_processing(data['time']) + + return new_data + + +# extend last_run_summary_file list with optional custom file +if isfile(CONFIGFILE): + with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e +else: + configfile = None + +if not output['error'] and configfile: + try: + if 'agent' in configfile.keys(): + custom_summary_file = configfile['agent']['summary_file'] + summary_files.insert(0, custom_summary_file) + except KeyError: + output['error'] = 1 + output['errorString'] = "Configfile Error: '%s'" % e + +# search existing summary file from list +if not output['error']: + summary_file = None + for sum_file in summary_files: + if isfile(sum_file): + summary_file = sum_file + break + + if not summary_file: + output['error'] = 1 + output['errorString'] = "no puppet agent run summary file found" + +# open summary file +if not output['error']: + msg, data = parse_yaml_file(summary_file) + + if msg: + output['error'] = 1 + output['errorString'] = msg + +output['data'] = processing(data) + +print (json.dumps(output)) From 03bf1d52e2b2a4138511c7b6b4428430e9e892ef Mon Sep 17 00:00:00 2001 From: Alex R Date: Thu, 13 Feb 2020 13:27:12 +0100 Subject: [PATCH 237/497] Added check if sudo is required and alter the $picmd to become "sudo $picmd" (#273) This will enable the script to run on libeelec images on raspberry pi for example. --- snmp/raspberry.sh | 54 ++++++++++++++++++++++++----------------------- 1 file changed, 28 insertions(+), 26 deletions(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index f5c57f827..72ec0616f 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -19,29 +19,31 @@ getStatusMPG4='codec_enabled MPG4' getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' -sudo $picmd $getTemp | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsCore | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamI | $pised 's|[^0-9.]||g' -sudo $picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -sudo $picmd $getFreqArm | $pised 's/frequency(45)=//g' -sudo $picmd $getFreqCore | $pised 's/frequency(1)=//g' -sudo $picmd $getStatusH264 | $pised 's/H264=//g' -sudo $picmd $getStatusMPG2 | $pised 's/MPG2=//g' -sudo $picmd $getStatusWVC1 | $pised 's/WVC1=//g' -sudo $picmd $getStatusMPG4 | $pised 's/MPG4=//g' -sudo $picmd $getStatusMJPG | $pised 's/MJPG=//g' -sudo $picmd $getStatusWMV9 | $pised 's/WMV9=//g' -sudo $picmd $getStatusH264 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMPG2 | $pised 's/enabled/2/g' -sudo $picmd $getStatusWVC1 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMPG4 | $pised 's/enabled/2/g' -sudo $picmd $getStatusMJPG | $pised 's/enabled/2/g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -sudo $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -sudo $picmd $getStatusH264 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMPG2 | $pised 's/disabled/1/g' -sudo $picmd $getStatusWVC1 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMPG4 | $pised 's/disabled/1/g' -sudo $picmd $getStatusMJPG | $pised 's/disabled/1/g' -sudo $picmd $getStatusWMV9 | $pised 's/disabled/1/g' +[ $(id -u) -eq 0 ] || picmd="sudo $picmd" + +$picmd $getTemp | $pised 's|[^0-9.]||g' +$picmd $getVoltsCore | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamC | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamI | $pised 's|[^0-9.]||g' +$picmd $getVoltsRamP | $pised 's|[^0-9.]||g' +$picmd $getFreqArm | $pised 's/frequency(45)=//g' +$picmd $getFreqCore | $pised 's/frequency(1)=//g' +$picmd $getStatusH264 | $pised 's/H264=//g' +$picmd $getStatusMPG2 | $pised 's/MPG2=//g' +$picmd $getStatusWVC1 | $pised 's/WVC1=//g' +$picmd $getStatusMPG4 | $pised 's/MPG4=//g' +$picmd $getStatusMJPG | $pised 's/MJPG=//g' +$picmd $getStatusWMV9 | $pised 's/WMV9=//g' +$picmd $getStatusH264 | $pised 's/enabled/2/g' +$picmd $getStatusMPG2 | $pised 's/enabled/2/g' +$picmd $getStatusWVC1 | $pised 's/enabled/2/g' +$picmd $getStatusMPG4 | $pised 's/enabled/2/g' +$picmd $getStatusMJPG | $pised 's/enabled/2/g' +$picmd $getStatusWMV9 | $pised 's/enabled/2/g' +$picmd $getStatusWMV9 | $pised 's/enabled/2/g' +$picmd $getStatusH264 | $pised 's/disabled/1/g' +$picmd $getStatusMPG2 | $pised 's/disabled/1/g' +$picmd $getStatusWVC1 | $pised 's/disabled/1/g' +$picmd $getStatusMPG4 | $pised 's/disabled/1/g' +$picmd $getStatusMJPG | $pised 's/disabled/1/g' +$picmd $getStatusWMV9 | $pised 's/disabled/1/g' From cdc07b61db3a44fd5f94d94605fb364b0fb1817f Mon Sep 17 00:00:00 2001 From: crcro Date: Thu, 13 Feb 2020 14:27:36 +0200 Subject: [PATCH 238/497] mailcow-dockerized postfix stats (#270) * initial release * added requirements info --- snmp/mailcow-dockerized-postfix | 90 +++++++++++++++++++++++++++++++++ 1 file changed, 90 insertions(+) create mode 100644 snmp/mailcow-dockerized-postfix diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix new file mode 100644 index 000000000..354f14fbd --- /dev/null +++ b/snmp/mailcow-dockerized-postfix @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# +# Copyright (C) 2020 Cercel Valentin-Adrian +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# mailcow-dockerized postfix stats +# please adjust librenms_poller_interval according to your LibreNMS setup - default to 5 minutes +# requirements: mailcow-dockerized and pflogsumm +# + +import subprocess +import re +import json + +# LibreNMS poller interval +librenms_poller_interval = 300 + + +def libre_to_mcd_postfix(libre_seconds): + return str(int(libre_seconds / 60)) + + +def cli_get_docker_container(): + return subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True).decode('utf8').strip() + + +def cli_command(): + cli_part = "docker logs --since " + libre_to_mcd_postfix(librenms_poller_interval) \ + + "m " + cli_get_docker_container() + "| pflogsumm --smtpd-stats" + return cli_part + + +def get_output(): + return subprocess.check_output(cli_command(), shell=True).decode('utf8') + + +def output_cleaning(input): + output = re.split('\n', input) + return list(filter(None, output)) + + +def entry_generator(input): + entry = re.sub(' +', ':', input.strip().lstrip()) + return entry.split(':') + + +# limit our needed output +mcd_postfix_data = get_output().split('messages') +data = mcd_postfix_data[1].split('smtpd') + +# postfix stats only +mcd_postfix_info = data[0] +# smtpd stats only +mcd_smtpd_info = data[1].split('Per-Hour Traffic Summary')[0] + +# postfix stats export +mcd_postfix = output_cleaning(mcd_postfix_info) + +points_data = [] +points_label = [] +for entry in mcd_postfix: + data_labels = entry_generator(entry) + + if data_labels[0].find('k') == -1: + points_data.append(data_labels[0]) + else: + data_point = data_labels[0].replace('k', '', 1) + data_point = int(data_point) * 1024 + points_data.append(data_point) + + points_label.append(re.sub('[^a-zA-Z]+', '', data_labels[1])) + +entries = dict(zip(points_label, points_data)) +export = {"data": entries, "error": "0", "errorString": "", "version": "1"} +data = re.sub(' ', '', json.dumps(export)) +print(data) + From ff99d99e4415bcf9bf3c259343e085f2984b26a6 Mon Sep 17 00:00:00 2001 From: Joseph Tingiris Date: Thu, 13 Feb 2020 07:29:10 -0500 Subject: [PATCH 239/497] apache-stats.sh: shell script that produces LibreNMS apache application output. The only dependency is curl. (#263) --- snmp/apache-stats.sh | 213 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 213 insertions(+) create mode 100755 snmp/apache-stats.sh diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh new file mode 100755 index 000000000..b1a9a06db --- /dev/null +++ b/snmp/apache-stats.sh @@ -0,0 +1,213 @@ +#!/usr/bin/env sh + +# This script produces LibreNMS apache-stats output. The only dependency is curl. + +# 20200102, joseph.tingiris@gmail.com + +PATH=/sbin:/bin:/usr/sbin:/usr/bin + +# +# Functions +# + +function debugecho() { + if [ ${#Debug} -gt 0 ]; then + echo debug: $@ + fi +} + +# +# Globals +# + +Tmp_File=/tmp/apache_status + +# Debug=on; use environment, i.e. Debug=on apache-stats.sh +if [ "${DEBUG}" != "" ]; then + Debug=${DEBUG} +else + if [ "${Debug}" != "" ]; then + Debug=${Debug} + fi +fi + +# set default values to U; not all apache's have all stats +Total_Accesses="U" +Total_kBytes="U" +CPULoad="U" +Uptime="U" +ReqPerSec="U" +BytesPerSec="U" +BytesPerReq="U" +BusyWorkers="U" +IdleWorkers="U" +Scoreboard="U" + +# set default scoreboard counters to 0 +let Scoreboard_=0 +let ScoreboardDot=0 +let ScoreboardC=0 +let ScoreboardD=0 +let ScoreboardG=0 +let ScoreboardI=0 +let ScoreboardK=0 +let ScoreboardL=0 +let ScoreboardR=0 +let ScoreboardS=0 +let ScoreboardW=0 + +# +# Main +# + +curl --silent --fail "http://localhost/server-status?auto" -o ${Tmp_File} &> /dev/null +if [ $? -ne 0 ]; then + # curl failed + exit 1 +fi + +if [ ! -s ${Tmp_File} ]; then + # empty output + exit 1 +fi + +while read Line; do + Field=${Line%:*} + Value=${Line#*: } + + debugecho "Line: ${Line}" + debugecho "Field: ${Field}" + debugecho "Value: ${Value}" + debugecho + + if [ "${Field}" == "Total Accesses" ]; then + Total_Accesses=${Value} + fi + + if [ "${Field}" == "Total kBytes" ]; then + Total_kBytes=${Value} + fi + + if [ "${Field}" == "CPULoad" ]; then + CPULoad=${Value} + fi + + if [ "${Field}" == "Uptime" ]; then + Uptime=${Value} + fi + + if [ "${Field}" == "ReqPerSec" ]; then + ReqPerSec=${Value} + fi + + if [ "${Field}" == "BytesPerSec" ]; then + BytesPerSec=${Value} + fi + + if [ "${Field}" == "BytesPerReq" ]; then + BytesPerReq=${Value} + fi + + if [ "${Field}" == "BusyWorkers" ]; then + BusyWorkers=${Value} + fi + + if [ "${Field}" == "IdleWorkers" ]; then + IdleWorkers=${Value} + fi + + if [ "${Field}" == "Scoreboard" ]; then + Scoreboard=${Value} + fi + +done < ${Tmp_File} + +# value output order must be this ... +echo "${Total_Accesses}" +echo "${Total_kBytes}" +echo "${CPULoad}" +echo "${Uptime}" +echo "${ReqPerSec}" +echo "${BytesPerSec}" +echo "${BytesPerReq}" +echo "${BusyWorkers}" +echo "${IdleWorkers}" + +debugecho "Scoreboard = ${Scoreboard}" +for (( c=0; c<${#Scoreboard}; c++ )); do + + if [ "${Scoreboard:$c:1}" == "_" ]; then + let Scoreboard_=${Scoreboard_}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "." ]; then + let ScoreboardDot=${ScoreboardDot}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "C" ]; then + let ScoreboardC=${ScoreboardC}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "D" ]; then + let ScoreboardD=${ScoreboardD}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "G" ]; then + let ScoreboardG=${ScoreboardG}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "I" ]; then + let ScoreboardI=${ScoreboardI}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "K" ]; then + let ScoreboardK=${ScoreboardK}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "L" ]; then + let ScoreboardL=${ScoreboardL}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "R" ]; then + let ScoreboardR=${ScoreboardR}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "S" ]; then + let ScoreboardS=${ScoreboardS}+1 + continue + fi + + if [ "${Scoreboard:$c:1}" == "W" ]; then + let ScoreboardW=${ScoreboardW}+1 + continue + fi + + debugecho "${Scoreboard:$c:1}" +done + +# scoreboard output order must be this ... +echo ${Scoreboard_} +echo ${ScoreboardS} +echo ${ScoreboardR} +echo ${ScoreboardW} +echo ${ScoreboardK} +echo ${ScoreboardD} +echo ${ScoreboardC} +echo ${ScoreboardL} +echo ${ScoreboardG} +echo ${ScoreboardI} +echo ${ScoreboardDot} + +# clean up +if [ -f ${Tmp_File} ]; then + rm -f ${Tmp_File} &> /dev/null +fi From 30ead0366bac220e9a0e0c031536b618f2afc1ba Mon Sep 17 00:00:00 2001 From: "David P. Discher" Date: Wed, 19 Feb 2020 00:08:57 -0800 Subject: [PATCH 240/497] Completely breaks libremns metric collections, lets actually do the assigment. (#276) Perl error "Useless use of multiplication (*) in void context at /etc/snmp.d/zfs-freebsd line 163." --- snmp/zfs-freebsd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index 93e162476..e4d27cf80 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -160,7 +160,7 @@ my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; my $data_demand_percent = 0; if ( $demand_data_total != 0 ){ - $demand_data_hits / $demand_data_total * 100; + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; } my $data_prefetch_percent=0; From 31ce375c01c4775be1ff3b5a26bb4d9b8d0ecb0f Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 28 Feb 2020 21:33:27 +0100 Subject: [PATCH 241/497] remove unneeded Squid parsing Script --- snmp/squid | 74 ------------------------------------------------------ 1 file changed, 74 deletions(-) delete mode 100644 snmp/squid diff --git a/snmp/squid b/snmp/squid deleted file mode 100644 index 633db4170..000000000 --- a/snmp/squid +++ /dev/null @@ -1,74 +0,0 @@ -#!/bin/sh - -# Add this to snmpd.conf as below. -# extend squid /etc/snmp/squid - -# To get this working smoothly and securely, you can add the items below to your squid.conf. -# acl snmppublic snmp_community public -# snmp_port 3401 -# snmp_access allow snmppublic localhost -# snmp_access deny all - - -# set this as being equal to the value of 'acl snmppublic snmp_community' in squid.conf -community='public' - -# set this as being equal to the value of 'snmp_port' in squid.conf -port='3401' - -# the full path to snmpwalk -snmpwalk='/usr/bin/env snmpwalk' - -## -## Nothing Should Need Changed Below Here -## - -# cacheMemMaxSize Integer32 -# cacheSwapMaxSize Integer32 -# cacheSwapHighWM Integer32 -# cacheSwapLowWM Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.2.5 - -# cacheSysPageFaults Counter32 -# cacheSysNumReads Counter32 -# cacheMemUsage Integer32 -# cacheCpuTime Integer32 -# cacheCpuUsage Integer32 -# cacheMaxResSize Integer32 -# cacheNumObjCount Gauge32 -# cacheCurrentLRUExpiration Timeticks -# Storage LRU Expiration Age -# cacheCurrentUnlinkRequests Gauge32 -# cacheCurrentUnusedFDescrCnt Gauge32 -# cacheCurrentResFileDescrCnt Gauge32 -# cacheCurrentFileDescrCnt Gauge32 -# cacheCurrentFileDescrMax Gauge32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.1 - -# cacheProtoClientHttpRequests Counter32 -# cacheHttpHits Counter32 -# cacheHttpErrors Counter32 -# cacheHttpInKb Counter32 -# cacheHttpOutKb Counter32 -# cacheIcpPktsSent Counter32 -# cacheIcpPktsRecv Counter32 -# cacheIcpKbSent Counter32 -# cacheIcpKbRecv Counter32 -# cacheServerRequests Integer32 -# cacheServerErrors Integer32 -# cacheServerInKb Counter32 -# cacheServerOutKb Counter32 -# cacheCurrentSwapSize Gauge32 -# cacheClients Gauge32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.1 - -# cacheRequestHitRatio.1 Integer32 -# cacheRequestHitRatio.5 Integer32 -# cacheRequestHitRatio.60 Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.9 - -# cacheRequestByteRatio.1 Integer32 -# cacheRequestByteRatio.5 Integer32 -# cacheRequestByteRatio.60 Integer32 -$snmpwalk -O qv -c $community -v 2c localhost:$port 1.3.6.1.4.1.3495.1.3.2.2.1.10 - From 3118fa1a8bd4e2b31c7a59bed8ff1e239d6c907c Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 21 Mar 2020 22:13:22 +0100 Subject: [PATCH 242/497] Pureftpd Byte to bit --- snmp/pureftpd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index 6f10770b0..1c768e1ba 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -57,7 +57,7 @@ data[acct][state] = {'bitrate': 0, 'connections': 0 } - bandwidth_bit = int(bandwidth) * 1024 + bandwidth_bit = int(bandwidth) * 1024 * 8 data[acct][state]['bitrate'] += bandwidth_bit data[acct][state]['connections'] += 1 From 9f53bb00fcd36537ca55aaec498bfba9a5a1e7e0 Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 11:40:41 +0200 Subject: [PATCH 243/497] Fix ssl.SSLCertVerificationError If the certificate is already expired, the script crash. Fix #286 --- snmp/certificate.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index 8957b7c9f..e81502bdf 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -24,12 +24,14 @@ def get_certificate_data(domain, port=443): # 3 second timeout because Lambda has runtime limitations conn.settimeout(3.0) + ssl_info = None try: conn.connect((domain, port)) error_msg = None - except ConnectionRefusedError as e: + ssl_info = conn.getpeercert() + except (ConnectionRefusedError, ssl.SSLCertVerificationError) as e: error_msg = e - ssl_info = conn.getpeercert() + return ssl_info, error_msg From d242c8d096976f94b1101a204c20358b2441d2a1 Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 17:59:00 +0200 Subject: [PATCH 244/497] Return values to librenms application in order to log the error --- snmp/certificate.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index e81502bdf..e5ed5ea71 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -24,13 +24,21 @@ def get_certificate_data(domain, port=443): # 3 second timeout because Lambda has runtime limitations conn.settimeout(3.0) - ssl_info = None + error_msg = None + ssl_info = {} try: conn.connect((domain, port)) error_msg = None ssl_info = conn.getpeercert() - except (ConnectionRefusedError, ssl.SSLCertVerificationError) as e: + except ConnectionRefusedError as e: error_msg = e + # Manage expired certificates + except ssl.SSLCertVerificationError as e: + # Arbitrary start date + ssl_info['notBefore'] = "Jan 1 00:00:00 2020 GMT" + # End date is now (we don't have the real one but the certificate is expired) + one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1) + ssl_info['notAfter'] = one_minute_further.strftime('%b %d %H:%M:%S %Y GMT') return ssl_info, error_msg From d1df902e85a62fcfe307b80b84d3f419d81282be Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Wed, 8 Apr 2020 17:59:44 +0200 Subject: [PATCH 245/497] Fix redundant line --- snmp/certificate.py | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index e5ed5ea71..d97f66f12 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -28,7 +28,6 @@ def get_certificate_data(domain, port=443): ssl_info = {} try: conn.connect((domain, port)) - error_msg = None ssl_info = conn.getpeercert() except ConnectionRefusedError as e: error_msg = e From 45af98da4e23cc77d49413cbf085121993ef1020 Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Sun, 12 Apr 2020 17:17:16 +0100 Subject: [PATCH 246/497] Make python3 version of Nginx extend the default (#284) * Rename nginx to nginx-python2, as python2 is EOL Rename Nginx to Nginx-python2, as python2 is EOL. Shall make python3 version the default version now. Keeping python2 version for legacy. * Rename nginx-python3.py to nginx --- snmp/nginx | 38 +++++++++++++++++++------------------- snmp/nginx-python2 | 28 ++++++++++++++++++++++++++++ snmp/nginx-python3.py | 28 ---------------------------- 3 files changed, 47 insertions(+), 47 deletions(-) create mode 100755 snmp/nginx-python2 delete mode 100755 snmp/nginx-python3.py diff --git a/snmp/nginx b/snmp/nginx index 06efab6e6..e2a64118d 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,28 +1,28 @@ -#!/usr/bin/env python2 -import urllib2 +#!/usr/bin/env python3 +from urllib.request import urlopen import re -data = urllib2.urlopen('http://localhost/nginx-status').read() +data = urlopen('http://localhost/nginx-status').read() params = {} -for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass +for line in data.decode().split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print(Active) + else: + print(params[param]) diff --git a/snmp/nginx-python2 b/snmp/nginx-python2 new file mode 100755 index 000000000..06efab6e6 --- /dev/null +++ b/snmp/nginx-python2 @@ -0,0 +1,28 @@ +#!/usr/bin/env python2 +import urllib2 +import re + +data = urllib2.urlopen('http://localhost/nginx-status').read() + +params = {} + +for line in data.split("\n"): + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass + +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] + +for param in dataorder: + if param == "Active": + Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + print Active + else: + print params[param] diff --git a/snmp/nginx-python3.py b/snmp/nginx-python3.py deleted file mode 100755 index e2a64118d..000000000 --- a/snmp/nginx-python3.py +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env python3 -from urllib.request import urlopen -import re - -data = urlopen('http://localhost/nginx-status').read() - -params = {} - -for line in data.decode().split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass - -dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] - -for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print(Active) - else: - print(params[param]) From 60d89f21459c65ce90cf63082bdefef89662637b Mon Sep 17 00:00:00 2001 From: Kees van Veen Date: Tue, 14 Apr 2020 16:42:33 +0200 Subject: [PATCH 247/497] added option to use portno in arguments --- snmp/portactivity | 83 ++++++++++++++++++++++++++++++++--------------- 1 file changed, 57 insertions(+), 26 deletions(-) diff --git a/snmp/portactivity b/snmp/portactivity index 9965e0d69..c44142de9 100755 --- a/snmp/portactivity +++ b/snmp/portactivity @@ -96,7 +96,7 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n". - "-p A comma seperated list of TCP protocols to check for in netstat.\n". + "-p A comma seperated list of TCP protocols (names or numbers) to check for in netstat.\n". "-P Print the output in a human readable manner.\n"; } @@ -196,11 +196,11 @@ my %valid_states=( 'UNKNOWN'=>1, ); -#gets the options +# gets the options my %opts=(); getopts('p:P', \%opts); -#what will be returned +# what will be returned my %to_return; $to_return{error}='0'; $to_return{errorString}=''; @@ -213,34 +213,42 @@ if (! defined( $opts{p} ) ){ exit 1; } -#the list of protocols to check for -my @protos_array=split(/\,/, $opts{p}); +# the list of arguments to check for +my @argument_array=split(/\,/, $opts{p}); +# the discovered protocols +my @protos_array=(); -#holds the various protocol hashes +# holds the various protocol hashes my %protos; my %proto_lookup; -#make sure each one specificied is defined and build the hash that will be returned +# the final data to present +my %data_collection; + +# make sure each one specificied is defined and build the hash that will be returned my $protos_array_int=0; -while ( defined( $protos_array[$protos_array_int] ) ){ - $protos{ $protos_array[$protos_array_int] }=newProto; - - #check if it exists - my $port=getservbyname( $protos_array[$protos_array_int] , 'tcp' ); - - # if it is not defined, then we error - if ( !defined( $port ) ){ - $to_return{errorString}='"'.$protos_array[$protos_array_int].'" is not a known service either add it or double check your spelling'; - $to_return{error}=4; - return_json(\%to_return, $opts{P}); - exit 4; +while ( defined( $argument_array[$protos_array_int] ) ){ + + # collect the argument + my $arg = $argument_array[$protos_array_int]; + + # Check if it is a port number .. + if ( $arg !~ /^-?\d+$/) { + # no it is a service name + $arg = getservbyname( $arg , 'tcp' ); } - $proto_lookup{ $port } = $protos_array [$protos_array_int ]; + # get a new protos online, by portno + $protos{ $arg } = newProto; + # add it to the "protos_array" && "proto_lookup" + push(@protos_array,$arg); + $proto_lookup{ $arg } = $arg; $protos_array_int++; } + + my $netstat='netstat -n'; my $os=$^O; @@ -269,15 +277,16 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ ){ $protos_array_int=0; my $service; + while( ( defined( $protos_array[ $protos_array_int ] ) ) && ( !defined( $service ) ) #stop once we find it ){ + #check if this matches either ports - if ( - ( defined($proto_lookup{ $conn->{'local_port'} }) ) || - ( defined($proto_lookup{ $conn->{'foreign_port'} }) ) - ){ + if ( $conn->{'local_port'} eq int ( $protos_array[ $protos_array_int ] ) ) { + $service=$protos_array[ $protos_array_int ]; + } elsif ( $conn->{'foreign_port'} eq int ( $protos_array[ $protos_array_int ] ) ) { $service=$protos_array[ $protos_array_int ]; } @@ -291,6 +300,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ my $state=$conn->{'state'}; #translate the state names if ( $os eq 'linux' ){ + if ( $state eq 'SYN_RECV' ){ $state='SYN_RECEIVED'; }elsif( $state eq 'FIN_WAIT1' ){ @@ -324,7 +334,7 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ } if ( - ( $conn->{'local_port'} eq $service ) && + ( $conn->{'local_port'} eq $service ) && ( $state ne 'LISTEN' ) && ( ! $processed ) ){ @@ -339,7 +349,28 @@ while ( defined( $res->[2]{'active_conns'}[$active_conns_int] ) ){ $active_conns_int++; } +# try to lookup a name for the portnumber used in the data +my($portno,$dataset); +while ( ($portno,$dataset) = each(%protos) ) { + + my $servicename = $portno; + + if ( $portno =~ /^-?\d+$/) { + my $servicename_test = getservbyport($portno,"tcp"); + + if ( ( not defined $servicename_test ) || ( $servicename_test eq '' ) ){ + $servicename = $portno; + } else { + $servicename = $servicename_test; + } + } + + # add to "data_collection" + $data_collection{$servicename} = $dataset; +} + + #return the finished product -$to_return{data}=\%protos; +$to_return{data}=\%data_collection; return_json(\%to_return, $opts{P}); exit 0; From 74e0dfc89fa8b71b20b482598eff44b550d163e9 Mon Sep 17 00:00:00 2001 From: AltiUP <44464440+AltiUP@users.noreply.github.com> Date: Sun, 19 Apr 2020 00:02:53 +0200 Subject: [PATCH 248/497] Change stats file location The stats file should not be placed in /var/run/named but in /var/cache/bind. In run there will be permissions problems. --- snmp/bind | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/bind b/snmp/bind index 1d66d2d18..390738132 100644 --- a/snmp/bind +++ b/snmp/bind @@ -39,7 +39,7 @@ The variables are as below. rndc = The path to rndc. Default: /usr/bin/env rndc call_rndc = A 0/1 boolean on weather to call rndc stats. Suggest to set to 0 if using netdata. Default: 1 - stats_file = The path to the named stats file. Default: /var/run/named/stats + stats_file = The path to the named stats file. Default: /var/cache/bind/stats agent = A 0/1 boolean for if this is being used as a LibreNMS agent or not. Default: 0 zero_stats = A 0/1 boolean for if the stats file should be zeroed first. Default: 0 (1 if guessed) @@ -53,7 +53,7 @@ it should be. ## my $call_rndc=1; my $rndc='/usr/bin/env rndc'; -my $stats_file='/var/run/named/stats'; +my $stats_file='/var/cache/bind/stats'; my $zero_stats=0; my $agent=0; my $missing=0; From c8f46f40a2a13d3f8f29a46f649d550d119d5949 Mon Sep 17 00:00:00 2001 From: Konstantin V Bekreyev Date: Tue, 21 Apr 2020 21:30:37 +0400 Subject: [PATCH 249/497] add FreeBSD to osupdate (#288) * Update osupdate osupdate for FreeBSD * Update osupdate --- snmp/osupdate | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index f45493dc4..8a391fa2b 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -24,6 +24,8 @@ BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' +BIN_PKG='/usr/sbin/pkg' +CMD_PKG=' audit -q -F' ################################################################ # Don't change anything unless you know what are you doing # @@ -68,6 +70,14 @@ elif command -v apt-get &>/dev/null ; then else echo "0"; fi +elif command -v pkg &>/dev/null ; then + # FreeBSD + UPDATES=`$BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC` + if [ $UPDATES -ge 1 ]; then + echo $UPDATES; + else + echo "0"; + fi else echo "0"; fi From 29995e6ec9763a7560462754e42955bceb5ad411 Mon Sep 17 00:00:00 2001 From: priiduonu Date: Thu, 23 Apr 2020 05:51:17 +0300 Subject: [PATCH 250/497] fix: removed double entry for WMV9 codec (#292) --- snmp/raspberry.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 72ec0616f..a2f924102 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -40,7 +40,6 @@ $picmd $getStatusWVC1 | $pised 's/enabled/2/g' $picmd $getStatusMPG4 | $pised 's/enabled/2/g' $picmd $getStatusMJPG | $pised 's/enabled/2/g' $picmd $getStatusWMV9 | $pised 's/enabled/2/g' -$picmd $getStatusWMV9 | $pised 's/enabled/2/g' $picmd $getStatusH264 | $pised 's/disabled/1/g' $picmd $getStatusMPG2 | $pised 's/disabled/1/g' $picmd $getStatusWVC1 | $pised 's/disabled/1/g' From fa0b8f05583a483a4644368166c6c5217e382198 Mon Sep 17 00:00:00 2001 From: Tobias Frederick Date: Thu, 30 Apr 2020 21:00:25 +0200 Subject: [PATCH 251/497] Fix apache-stats.sh --- snmp/apache-stats.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index b1a9a06db..275d81586 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env sh +#!/usr/bin/env bash # This script produces LibreNMS apache-stats output. The only dependency is curl. From 90b55e527dab46564082336084b8dc06fd5e2f48 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 14 May 2020 23:27:47 +0200 Subject: [PATCH 252/497] Redis Application Agent --- snmp/redis.py | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100755 snmp/redis.py diff --git a/snmp/redis.py b/snmp/redis.py new file mode 100755 index 000000000..59905c63c --- /dev/null +++ b/snmp/redis.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python3 + +import subprocess +import json + +shell_cmd = "redis-cli info" +all_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') + +version = 1 +error = 0 +error_string = "" +redis_data = {} + +# stdout list to json +try: + category = '' + for d in all_data: + d = d.replace(b'\r', b'') + + if d in [b'']: + continue + + if d.startswith(b'#'): + category = d.replace(b'# ', b'').decode("utf-8") + redis_data[category] = {} + continue + + if not len(category): + error = 2 + error_string = 'category not defined' + break + + k, v = d.split(b':') + k = k.decode("utf-8") + v = v.decode("utf-8") + + # convert string to int/float, if possible +# try: +# if '.' in v: +# v = float(v) +# else: +# v = int(v) +# except ValueError: +# pass + + redis_data[category][k] = v + +except: + error = 1 + error_string = 'data extracting error' + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': redis_data} + +#print (json.dumps(output, indent=4, sort_keys=True)) +print (json.dumps(output)) From 20d3830fb67a3848e7fc03a769a5624088234498 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 15 May 2020 05:18:33 +0200 Subject: [PATCH 253/497] Remove comments --- snmp/redis.py | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/snmp/redis.py b/snmp/redis.py index 59905c63c..097dda78c 100755 --- a/snmp/redis.py +++ b/snmp/redis.py @@ -34,15 +34,6 @@ k = k.decode("utf-8") v = v.decode("utf-8") - # convert string to int/float, if possible -# try: -# if '.' in v: -# v = float(v) -# else: -# v = int(v) -# except ValueError: -# pass - redis_data[category][k] = v except: @@ -54,5 +45,4 @@ 'errorString': error_string, 'data': redis_data} -#print (json.dumps(output, indent=4, sort_keys=True)) print (json.dumps(output)) From f579947096e931367e8633ae6b392786bd199e1d Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Fri, 15 May 2020 01:15:20 -0500 Subject: [PATCH 254/497] Update gpsd 4s max time limit was causing some timeouts, especially given the two 1s sleeps. Especially with a lot of sentences coming back from the GPS chip it was probably not enough to always catch the right variables. --- agent-local/gpsd | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/agent-local/gpsd b/agent-local/gpsd index cfb0b0b97..a774c794f 100755 --- a/agent-local/gpsd +++ b/agent-local/gpsd @@ -4,8 +4,8 @@ $server = 'localhost'; $port = 2947; -set_time_limit(4); -ini_set('max_execution_time', 4); +set_time_limit(6); +ini_set('max_execution_time', 6); $sock = @fsockopen($server, $port, $errno, $errstr, 2); @@ -50,4 +50,4 @@ if ($resp) { function satellite_used($sat) { return $sat->used; -} \ No newline at end of file +} From fd43b7ca27da081f2da097c006ce6684d639cf5e Mon Sep 17 00:00:00 2001 From: "Chris (Someguy123)" Date: Fri, 22 May 2020 07:48:00 +0100 Subject: [PATCH 255/497] Remove absolute exe for powerdns + use python3 - `agent-local/powerdns` - Replaced the absolute path `/usr/bin/pdns_control` with `pdns_control`, since pdns_control can also be in `/usr/sbin` or `/usr/local/bin`. Python can find the executable using PATH just fine. - Changed `vars` to `kvars` to avoid conflicting with the reserved `vars` Python symbol - Changed shebang to use `python3` instead of `python` - as Python 2 is EOL. - `agent-local/powerdns-recursor` - Changed shebang to use `/usr/bin/env` instead of a hardcoded path to Python - Changed shebang to use `python3` instead of `python` - as Python 2 is EOL. **NOTE:** As per https://pythonclock.org/ - Python 2 is end-of-life, and is no longer included by default on modern Linux distros, along with macOS (OS X). I would recommend adjusting all Python-based agents to use Python 3 by default, instead of Python 2. --- agent-local/powerdns | 22 ++++++++++++---------- agent-local/powerdns-recursor | 2 +- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/agent-local/powerdns b/agent-local/powerdns index 913abcf90..d4fc6bb30 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -1,16 +1,17 @@ -#!/usr/bin/env python - +#!/usr/bin/env python3 from subprocess import Popen, PIPE -vars = [ 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', -'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', -'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', -'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', -'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', -'udp4-queries', 'udp6-answers', 'udp6-queries' ] +kvars = [ + 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', + 'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', + 'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', + 'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', + 'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', + 'udp4-queries', 'udp6-answers', 'udp6-queries' +] rvars = {} -cmd = ['/usr/bin/pdns_control', 'show', '*'] +cmd = ['pdns_control', 'show', '*'] for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): v = l.split('=') @@ -19,5 +20,6 @@ for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): print("<<>>") -for k in vars: +for k in kvars: print(rvars[k]) + diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor index 0ac290489..6949c7f44 100755 --- a/agent-local/powerdns-recursor +++ b/agent-local/powerdns-recursor @@ -1,4 +1,4 @@ -#!/usr/bin/python +#!/usr/bin/env python3 import json, subprocess from subprocess import Popen, PIPE From 06dcc6fada68f2c8ff428593fed365b058b5fd5b Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 30 May 2020 18:10:16 +0200 Subject: [PATCH 256/497] remove unneeded file --- qq | 2069 ------------------------------------------------------------ 1 file changed, 2069 deletions(-) delete mode 100644 qq diff --git a/qq b/qq deleted file mode 100644 index 58a21ca28..000000000 --- a/qq +++ /dev/null @@ -1,2069 +0,0 @@ -commit 3361bf4c3ae5868b00d09215e10359f58a36ac12 -Author: SourceDoctor -Date: Wed May 1 14:55:54 2019 +0200 - - add mdadm support - -commit d822c899a78bdfd1e7d9f4df2bd5cd512b1696bd -Merge: 8fbfbd5 544fd8b -Author: VVelox -Date: Sun Mar 24 03:56:16 2019 -0500 - - Merge pull request #226 from VVelox/smart-update - - SMART monitoring update adding RAID support - -commit 544fd8bd6e525b3c29d9965c2b405b39ba49a98d -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 02:58:30 2019 -0500 - - update the date - -commit 8fbfbd5b39bbc22ca606327813c4fe54b38e4d30 -Merge: cb04f8c 38acc2b -Author: VVelox -Date: Tue Mar 19 02:53:30 2019 -0500 - - Merge pull request #225 from VVelox/pa-fix - - portactivity fixes - -commit 503fb9f7389d8307074ed856f96a870a0d26dd72 -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 02:25:17 2019 -0500 - - tested and it appears to work properly... documentation updated - -commit bdfd0ceea948382684a2bd96659731f9ac5f15b1 -Author: Zane C. Bowers-Hadley -Date: Tue Mar 19 00:40:06 2019 -0500 - - update the guessing to only use smartctl --scan-open and generate with more complex options - -commit 38acc2bd3d8e81414b4bfc2cb2bb3e955877fbc1 -Author: Zane C. Bowers-Hadley -Date: Mon Mar 18 03:39:17 2019 -0500 - - actually make this work on system not FreeBSD and deal with the bug where a connection may not have a protocol - -commit cb04f8c0ac148cb2b250d0a408f672db22e99ed5 -Merge: 147cb67 af32f56 -Author: VVelox -Date: Sun Mar 17 23:27:46 2019 -0500 - - Merge pull request #224 from VVelox/zfs-fix - - ZFS-FreeBSD divide by zero fix - -commit af32f56a74e0d9915b4beb419a28814e9bf058d8 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:07:59 2019 -0500 - - merge... and update version - -commit 658c3c6ead712837bbb763c6b9ecdd782b043629 -Merge: 6564128 147cb67 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:06:57 2019 -0500 - - Merge branch 'zfs-fix' of https://github.com/VVelox/librenms-agent into zfs-fix - -commit 656412830564593cfefeee5dceeae89bfa371000 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 06:02:43 2019 -0500 - - remove unneeded else statement and re-apply patch - -commit 3ce06d6defc63f200f2bbfec7718748c8ec9e832 -Author: Zane C. Bowers-Hadley -Date: Sun Mar 17 05:55:33 2019 -0500 - - freshly initilized ZFS pulls that are not in use don't have a $data_demand_total - -commit 147cb67824b213045826677946166c8ee807f23c -Author: Tony Murray -Date: Tue Feb 12 20:33:05 2019 -0600 - - Use os-release whenever possible for the distro script (#220) - - Except centos... https://bugs.centos.org/view.php?id=8359 - -commit c9a0d2893e44f89f7c8c9450a9d42438eff1404d -Author: Felicián Hoppál -Date: Mon Feb 11 23:06:57 2019 +0100 - - Fix: zpool list output changed, incorrect values (#219) - - * fix zpool data, output of zpool list -pH changed in freebsd 11 - - * fix zpool data, output of zpool list -pH changed in freebsd 11 - - * bump version - - * version dump to 2 - -commit 3a407e3f721b7677fb2724af736ea87838d4dcc5 -Author: Tony Murray -Date: Thu Jan 17 11:44:02 2019 -0600 - - Update powerdns script to json (#218) - -commit ad300c035a2be4a55553c2994d5ce7ba69d57432 -Author: VVelox -Date: Wed Jan 9 23:41:39 2019 -0600 - - various misc fixes for the postfix poller (#112) - - * update postfix - - * move a few things to reduce the number of changed lines - - * move mself to the end - - * white space cleanup and another small cleanup of $chr - - * use $chrNew instead of $chrC when writing the current values - - * more white space cleanup - - * replace one more missed instance of iuoscp - -commit c40606140114b9059409f17a21b06fe8655b760e -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Thu Jan 10 18:40:40 2019 +1300 - - Fix: InnoDB stat support for MariaDB v10+ (#211) - - * mariadb innodb support for v10+ - - * fix newer innodb insert buffers - - * agent mysql to snmp extend - -commit 6fdaffa1b2ba8c49ed8bd38fb6445335b3146329 -Author: Mike Centola -Date: Thu Jan 10 00:35:28 2019 -0500 - - Added gpsd script for SNMP Extend (#217) - - Fixed Typos - - Fixed another typo - -commit f54c442d06abd7d2112dc4dc5db315524030308c -Merge: 1b90904 107d72e -Author: CrazyMax -Date: Sat Dec 29 22:17:13 2018 +0100 - - Merge pull request #216 from jasoncheng7115/patch-2 - - Added Proxmox VE Versoin support - -commit 1b90904f61c6d4078f2b427e17c82cf1f8b926ba -Author: VVelox -Date: Fri Dec 28 20:10:13 2018 -0600 - - convert the FreeBSD NFS stuff over to JSON and add in lots of sanity (#190) - - * convert fbsdnfsclient over to JSON - - * Convert the server stuff to JSON and fix the output of the client extend. - - * misc. stuff - - * lots of cleanup and sanity added to the FreeBSD NFS scripts - - * fix the #! line - - * update the docs at the top - -commit 5be1b168ba4e03ba3a58b3833a26587474ff7b29 -Author: VVelox -Date: Fri Dec 28 20:08:46 2018 -0600 - - JSON SNMP extend for UPS-APC app. (#189) - - * add snmp/ups-apcups, a Perl rewrite of snmp/ups-apcups.sh to support JSON - - * finish documenting it - - * add version and remove units from the returned values - -commit 107d72e862c2e2a53870272859252a5d39bf8c72 -Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> -Date: Tue Dec 25 09:15:22 2018 +0800 - - Added Proxmox VE Versoin support - -commit 433d744953fa800ce49fa060b141c10663c0b952 -Author: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> -Date: Sun Dec 16 22:21:00 2018 +0800 - - Added FreeNAS Version support (#215) - - Hi, - - I added FreeNAS version information support, as shown in the figure: - - ![2018-12-15 11 53 31](https://user-images.githubusercontent.com/30381035/50044886-2329a580-00c5-11e9-817c-b89a8374270d.png) - ![2018-12-15 11 53 49](https://user-images.githubusercontent.com/30381035/50044887-2329a580-00c5-11e9-93b4-b140809f84a3.png) - -commit 3c4511d987c2058bd6e8605bb0e87c6fc1d36861 -Merge: ff124a1 dc3d267 -Author: CrazyMax -Date: Fri Dec 14 19:03:01 2018 +0100 - - Merge pull request #214 from dsgagi/patch-1 - - Fix Debian detection on Proxmox - lsb_release binary doesn't exist - -commit dc3d2673ddc86d02ca2cd8d93bbf2fd53ca43c55 -Author: dsgagi -Date: Fri Dec 14 18:49:58 2018 +0100 - - Update distro - - Remove extra white spaces. - -commit 456d2e7672d8532af4df7f6da2b5c18b02778bf7 -Author: dsgagi -Date: Fri Dec 14 18:47:54 2018 +0100 - - Update distro - - Minor changes to the code, for better output. - -commit 5b53ab54c8a6d9f3b81abf42725b5da2b3ebec3d -Author: dsgagi -Date: Wed Dec 12 16:09:25 2018 +0100 - - Update distro - -commit ff124a1358755ceddc0ae6a4187d358da0d54d06 -Author: VVelox -Date: Thu Nov 22 09:04:58 2018 -0600 - - add portactivity SNMP extend (#159) - - * add portactivity SNMP extend in its initial form - - * update for the current json_app_get - - * add version to the returned JSON - - * add basic POD documentation - -commit a827734c0ec0e0cdf5e2a04730ec68dbad3fd477 -Author: gardar -Date: Thu Oct 25 19:19:20 2018 +0000 - - CloudLinux distro detection (#208) - - Added CloudLinux distro detection, previously CloudLinux got identified as RedHat - -commit 8d66211adc47d3bad5dd042e3ddbc59a23a28819 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Thu Oct 25 07:17:42 2018 -0400 - - Fix package manager detection (#204) - - * Fix package manager detection - - * use release file for os detection - - * Use command to to validate package manager type - - * check if exists and the execute permission is granted - - * make script more portable - -commit d49fe954dfdeffbeee091051f1f0c515d020f281 -Author: Félim Whiteley -Date: Tue Oct 23 17:46:54 2018 +0100 - - Add divide by zero check (#191) - - On several servers (Ubuntu 18.04) DEMAND_DATA_TOTAL is 0 currently and is causing an error - - Traceback (most recent call last): - File "/usr/local/bin/zfs-linux", line 178, in - sys.exit(main(sys.argv[1:])) - File "/usr/local/bin/zfs-linux", line 76, in main - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 - ZeroDivisionError: division by zero - -commit 381cc2466af521772607c682a9a707471a38ff4b -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Tue Oct 23 08:51:12 2018 -0400 - - fix nginx script indentation (#205) - -commit 3dada041e433318592e137678d24c32dd1a134b4 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Thu Oct 18 10:37:10 2018 -0400 - - Fix binary operator expected error (#203) - -commit ccb244aa09de36e4e4dd85120702580144e86383 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:28:39 2018 -0400 - - osupdate script clean up (#199) - - - Change script name for simplify of configuration management orchestration scripts. - - Update code syntax. - -commit f0f34b4a2d1a36836f6bffe4307d5d51524009b4 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:28:07 2018 -0400 - - phpfpmsf script clean up (#198) - - - Change script name for simplify of configuration management orchestration scripts. - - Update code syntax. - -commit e0dcd4a064cedb09241e4af17198bf61e8fd1bf3 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:27:21 2018 -0400 - - nginx script clean up (#197) - - - Change script name for simplify of configuration management orchestration scripts. - - Change 172.0.0.1 to localhost for better nginx handling. - -commit 1c61a96344317c13fce90811c11c0fa4cb7efb36 -Author: sparknsh <38894705+sparknsh@users.noreply.github.com> -Date: Wed Oct 17 12:26:45 2018 -0400 - - ntp-client data correction (#196) - - NTP was not displaying data right for linux servers. It was putting the frequency data into the offset data. This was giving bad graphs in the UI. Tested the correction on both RHEL and Debian based operating systems and all passes. - - Remove the .sh to simplify for configuration management orchestration scripts. - -commit 28a2f8ae55db7ca773f881560017b4890bc4bbce -Author: voxnil <14983067+voxnil@users.noreply.github.com> -Date: Mon Oct 15 13:00:16 2018 -0700 - - Update zfs-linux to use env for python - -commit ca7a5cdafe6dd603538aad8f63bc624143f98377 -Author: Brock Alberry -Date: Wed Sep 19 09:09:04 2018 -0400 - - PhotonOS distro detection (#193) - - * PhotonOS distro detection - - Detection before `/etc/os-release` since that is present yet missing the build number. - - * awk detection - - combining https://github.com/librenms/librenms-agent/pull/193 and https://github.com/librenms/librenms-agent/pull/194 - -commit 7542bd26f4c883c7e622056a1a34909d1dc9aa2c -Author: Allison -Date: Tue Sep 18 20:20:23 2018 -0700 - - Update distro (#194) - - Adding full detection for ASUSWRT-Merlin - -commit 7c173b160c5be401fa36d85edf15add61a3146d7 -Author: VVelox -Date: Mon Aug 27 04:03:01 2018 -0500 - - convert all the NTP stuff to JSON (#174) - - This requires https://github.com/librenms/librenms/pull/8571 and is for https://github.com/librenms/librenms/pull/8608 . - - Also converted this to regular sh instead of bash, so it will work on more systems with less dependencies. - - Has been tested as working on DD-WRT and FreeBSD. - -commit 99ad80740cb2fcea1c33e59caf1c05af5a53a14f -Author: VVelox -Date: Sun Aug 19 17:47:07 2018 -0500 - - update for the new json_app_get stuff (#179) - -commit c772ac97d3f5b805c311fd13d924513b4561d10b -Author: crcro -Date: Fri Aug 10 00:44:02 2018 +0300 - - added rockstor nas distro detection (#187) - -commit c535b1286c7701a2cefcd10ffd799fba65e56dd2 -Author: TheGreatDoc <32565115+TheGreatDoc@users.noreply.github.com> -Date: Thu Jul 19 22:39:08 2018 +0200 - - Asterisk Script (#183) - - Asterisk App support. - - Channels - - Calls - - Total SIP Peers - - Monitored Online - - Monitored Offline - - Unmonitored Online - - Unmonitored Offline - -commit 7e55d1cd5db04019de09aff7b134a85df71e901a -Author: István Sárándi -Date: Mon Jun 25 16:10:00 2018 +0200 - - Update fail2ban extend script to new JSON format (#181) - - As seen at [this location](https://github.com/librenms/librenms/blob/7fab99cfc13b80a543fb779d68c659b52fc074b1/includes/polling/functions.inc.php#L768) the JSON output needs to contain a `data` field. The poller php script actually also extracts this `data` field as one of the first steps, see at [this line](https://github.com/librenms/librenms/blob/c3007b483a12758042e5d0c6009a8ef48e3e1a39/includes/polling/applications/fail2ban.inc.php#L36). - Before I changed these parts the graph didn't show up because the RRD files simply weren't generated as an exception occurred in the poller. This fixes this problem. - -commit b5d77f1a999c5e0f08bc02550fd24e7c37b759c7 -Author: VVelox -Date: Mon May 28 07:22:09 2018 -0500 - - convert fail2ban-client to JSON (#172) - - * convert to JSON - - * add version return - - * change the version number of the returned data to 1 - -commit 41d36dc97f6886bae4ae6e8ba928892ef9d3c8c3 -Author: VVelox -Date: Fri Apr 27 16:46:57 2018 -0500 - - make using SN or device name selectable for SMART reporting (#168) - - * make using SN or device name selectable - - * change the default to SN - -commit 385d466eee1adc06eecd4a84cfd6615f2e4ba2ec -Author: Sander Steffann -Date: Fri Apr 13 17:42:27 2018 +0100 - - Add random entropy monitoring (#173) - -commit a56adb467a1cdf9785f977420dd07a48335f41b3 -Author: Serphentas -Date: Wed Apr 11 10:39:32 2018 +0200 - - add zfs support for linux (#170) - - * add zfs support for linux - - * fix pools and anon_hits_per - - * strip percent sign for pool cap - - * fix anon_hits json key typo - - * fix demand_data_hits json key typo - - * fix comparison as in #169 - - * fix min_size_percent - -commit 8ec6017246edc9784e670d84bd8b52ec094dbb82 -Author: VVelox -Date: Wed Apr 11 02:34:39 2018 -0500 - - correct arc size breakdown (#171) - -commit 3ddb1d6be6b4a4a0cd006251b497bb1ccf8170e8 -Author: VVelox -Date: Tue Apr 10 22:04:07 2018 -0500 - - correct arc size breakdown - -commit 90fd6f60f3aed5f71140d23a8d022ae9909e7473 -Author: Dylan Underwood -Date: Fri Mar 23 11:24:02 2018 -0500 - - Should be greater than or equal to (#167) - -commit 3a8462461595535a53554b0ad66bc922118e83d1 -Author: endofline -Date: Tue Feb 27 23:10:35 2018 +0200 - - Replace disk identifier with disk serial in S.M.A.R.T snmp script (#164) - -commit bbd3b1309aaa3ecaf6f502e92718719539715c58 -Author: endofline -Date: Sun Feb 18 22:33:42 2018 +0200 - - Fix Command_Timeout missing from SMART output (#163) - -commit fd9fd178a4b43feafb414822167b3033693c8efc -Author: crcro -Date: Sat Jan 6 22:06:45 2018 +0200 - - extend: powerdns-dnsdist (#158) - - * powerdns-dnsdist app - - * fix script in help - - * removed local data manipulation - - * again name of file in script help - - * removed personal api info - -commit bacaca0be4104cc003222b941e433d5470cae76d -Author: VVelox -Date: Sat Dec 30 05:42:37 2017 -0600 - - ZFS SNMP agent :3 <3 (#156) - - * Add it as it currently is. Needs to be moved over to JSON - - * rename it to zfs-freebsd as it is FreeBSD specific - - now uses JSON - - * misc. updates and document it all - - * minor spelling correction - -commit c7cae0765e0f5072fdf3dd224f357290e2697fb5 -Author: VVelox -Date: Sat Dec 30 05:39:36 2017 -0600 - - update the fail2ban stuff (#155) - - Dropping firewall checking as the new fail2ban uses pf and anchors on - FreeBSD, which while esoteric as fuck works nicely and is reliable. - -commit 8920cd3f290e8c13a3bb7db96ceb8db05845869d -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Wed Dec 13 16:13:10 2017 +1300 - - freeradius.sh: new agent for incoming main PR (#151) - - * Update os-updates.sh - - * Update os-updates.sh - - * Update os-updates.sh - - * Create freeradius.sh - - * Update freeradius.sh - - * Update freeradius.sh - -commit 3b9d632a8d6dbd6ac3f42f75ba36faa235ef4440 -Author: arrmo -Date: Mon Dec 4 14:11:17 2017 -0600 - - hddtemp, ignore devices not supporting SMART (#153) - -commit 7fb48df8579a8e113153c1439a4fa92829847d9f -Author: Daniel Bull -Date: Fri Oct 27 06:41:05 2017 +0100 - - Fix: Apache SNMP extend IndexError (#116) - - See issue for more information: - https://github.com/librenms/librenms-agent/issues/95 - -commit 2996ad88b00f24777c0e5629cb931b8b448dd515 -Author: dragans -Date: Fri Oct 27 07:39:09 2017 +0200 - - fix: Update mysql (#127) - - Update mysql agent script based on updated changes in newest version of Percona Monitoring Plugins (Cacti template). - - Changes enable correct parsing of status data for newer versions of MySQL/MariaDB database servers and should be backward compatible with older versions. - -commit d0762871b4cfb0a7cbfcc5ba99bc1fe0b0c51cf3 -Author: Slashdoom <5092581+slashdoom@users.noreply.github.com> -Date: Tue Oct 10 08:02:05 2017 +1300 - - os-update.sh: back to package management based and count fixes (#149) - - * Update os-updates.sh - - * Update os-updates.sh - - * Update os-updates.sh - -commit 6a40ca1e9cc4319e6b7363541feb9681dcf5bc5f -Author: tomarch -Date: Wed Sep 20 21:47:11 2017 +0200 - - fix munin agent (#148) - - Without the full munin-scripts path, this script won't find munin file and return nothing. - -commit 1b03d2f9f74ca29b177e596c0ff2ba13a0e1292d -Author: Uwe Arzt -Date: Wed Sep 6 20:42:58 2017 +0200 - - Add Oracle Linux Distribution to distro script (#146) - - * Add Oracle Linux to distro script - - * Revert local change - -commit 45478555df856af51e707c3cd6ace716c709e0fb -Author: arrmo -Date: Sun Aug 27 14:59:15 2017 -0500 - - Update Distro, for Raspbian Support (#144) - -commit 3380a85ff13f0dad706690b71b2bd8e9d9452926 -Author: Zucht -Date: Sat Aug 12 17:30:02 2017 +0200 - - Update raspberry.sh (#143) - - Fix state WMV9 - -commit a50e1dffb89738814a1183e2e0560ab86daaf3f0 -Author: Neil Lathwood -Date: Thu Aug 3 17:11:26 2017 +0100 - - Update raspberry.sh (#140) - -commit 584fd645d470e85e30607b8be3102292b4a7b54e -Author: drid -Date: Wed Jul 12 22:55:02 2017 +0300 - - C.H.I.P. power values (#134) - - * C.H.I.P. power values - - * Added attribution - - * Fix ACIN current calculation - - * Battery current fix - -commit 3f9dc0f5f02c1590d6e84ac10c6f7c973d54f771 -Author: RedChops -Date: Thu Jun 29 16:11:26 2017 -0400 - - Fix for bug https://github.com/librenms/librenms/issues/6821 (#138) - -commit a4efb62466c58ee05b3c078283a2a9fecb7cd3ce -Author: Stefan Funke -Date: Wed Jun 28 22:36:26 2017 +0200 - - unnecessary use of wc while already calling grep (#137) - - * useless call of wc while already calling grep - - * move grep count call to CMD_GREP to stay in project style - -commit cc6d7882dba89bce0a1f3a27d9fd3b399a2430b9 -Author: einarjh -Date: Sat Jun 10 11:20:48 2017 +0200 - - Strip all non-ASCII characters from hddtemp output (#136) - -commit 3903f431f7f56ef4f48bd50d28c05aec8e795bc0 -Author: crcro -Date: Tue Jun 6 01:00:29 2017 +0300 - - bash script for pi-hole app (#135) - -commit 84630dfb84539936efa47bfe3b13638f809a82c5 -Author: Félim Whiteley -Date: Wed May 31 22:23:38 2017 +0100 - - Fix for first line as localhost (#130) - - An example output like below where the first line of output is just "localhost" so it causes the splitting to cause an out of index error. - - Example: - cat /tmp/apache-snmp - localhost - ServerVersion: Apache/2.4.25 (Ubuntu) PHP/5.6.30-5+deb.sury.org~trusty+2 - ServerMPM: prefork - Server Built: 2016-12-21T00:00:00 - CurrentTime: Thursday, 18-May-2017 19:26:43 EDT - RestartTime: Thursday, 18-May-2017 11:35:48 EDT - ParentServerConfigGeneration: 2 - ParentServerMPMGeneration: 1 - ServerUptimeSeconds: 28255 - ServerUptime: 7 hours 50 minutes 55 seconds - Load1: 0.04 - Load5: 0.05 - Load15: 0.10 - Total Accesses: 5367 - Total kBytes: 61432 - CPUUser: 19.69 - CPUSystem: 1.05 - CPUChildrenUser: 0 - CPUChildrenSystem: 0 - CPULoad: .0734029 - Uptime: 28255 - ReqPerSec: .189949 - BytesPerSec: 2226.38 - BytesPerReq: 11721 - BusyWorkers: 1 - IdleWorkers: 6 - Scoreboard: ___....._.__.W........................................................................................................................................ - -commit 16178c6ac31ed2511243ccfab5b25b69b031d3fa -Author: Aldemir Akpinar -Date: Thu Jun 1 00:23:07 2017 +0300 - - Added devuan support for os-updates.sh and removed code repitition (#131) - -commit f473c5e30ca0649baa590dd5a7f041ce91f57e73 -Author: BlackDex -Date: Tue May 23 14:44:05 2017 +0200 - - Added try-except checks for global values. (#107) - - Fixed an error which prevented output. - It seems some ceph version probably use different values or something. This is a quick fix to have the script output the correct values. - -commit 6fdcc91f7041ad49cbb906b814a1b5ecf8fd2e4c -Author: Karl Shea -Date: Thu May 4 02:06:32 2017 -0500 - - Fix bind config file read (#125) - -commit e3dad6cfc9c6549e1f5cfef41ef2cf20a9827352 -Author: VVelox -Date: Wed May 3 09:23:40 2017 -0500 - - BIND cleanup and expansion (#108) - - * add BIND named SNMP extend - - * nolonger piss the entire stats across the wire, but crunch them and return them - - * more work on bind - - * more misc. updates - - * add proper agent support as well as optional zeroing - - * add -m - -commit 69eee9fb898bd521e3f4ab5d2d93cf5b34949e1d -Author: Aldemir Akpinar -Date: Tue May 2 12:22:19 2017 +0300 - - Added Devuan GNU/Linux support (#124) - -commit eaa6af235978405418d8e6d6e0beb04f761a578b -Author: crcro -Date: Thu Apr 27 22:54:55 2017 +0300 - - snmp-extend: sdfsinfo (#122) - - * sdfsinfo app snmp extend - - * rewrite script to bash - - * more vars - -commit 69e1ace889cfee6963cc6506a5e96fb30cabac1b -Author: RedChops -Date: Sat Apr 22 19:29:00 2017 -0400 - - Include missing SMART ids in the output (#120) - -commit 705cc0f3fe62e4837ecf4be86dec95558ca07ff3 -Author: Svennd -Date: Tue Apr 18 22:34:05 2017 +0200 - - add support for SGE/rocks job tracker (#118) - -commit d7085e001cebf0bf086b84ac0c65cad54f90ee38 -Author: Chris Putnam -Date: Tue Apr 18 13:32:41 2017 -0700 - - hddtemp: parallelize calls to hddtemp for performance (#117) - - This poll script runs hddtemp with a list of all drives as arguments and reads the output. hddtemp scans each drive's SMART status serially, which scales poorly with a large number of drives. - - In lieu of a patch to the actual hddtemp project, optionally use GNU parallel when available to parallelize the call to hddtemp. - - In testing a machine with 58 drives I went from a runtime of about 5 seconds per run to 0.5s, a performance improvement of 10x. - -commit 5f47aad492a679a81da0a19f2649f60d6637e199 -Author: Chris Putnam -Date: Fri Apr 7 01:45:56 2017 -0500 - - hddtemp: improve detection of drives (#115) - - Previously, this script was only able to find 26 drives (sda-sdz) due to the use of globbing. - - A better strategy for detecting drives would be to use lsblk on systems that support it, failing over to globbing. - - This patch adds support both for lsblk and a more comprehensive glob solution with find that will at least catch 26^2 drives. - -commit 67bae5a86cfe47c90ade541c1e613f7e5e788cfd -Author: Philip Rosenberg-Watt -Date: Thu Apr 6 03:24:36 2017 -0600 - - fix: Update proxmox agent to use new Perl module (#88) - - PVE::API2Client is deprecated in Proxmox 4.4-6. Agent now requires - installation of libpve-apiclient-perl via apt. - - This commit fixes #81. - -commit a7fe1f8e6f98640463a93f934ac2580311db09ca -Author: Tony Murray -Date: Wed Mar 29 19:11:23 2017 -0500 - - Copy powerdns-recursor to snmp and remove <<>> header (#111) - -commit 74faec281c13928e60c140d85bb3138e7297fa79 -Author: Florian Beer -Date: Thu Mar 30 00:00:26 2017 +0200 - - Postfix app bug fixes (#105) - - * Postfix app bug fixes - - - add missing DS - - fix some totals - - * Move new variable to end of output - -commit 1e7762fb4eb832ed9d7530994804a284028c9c7c -Author: VVelox -Date: Wed Mar 22 09:28:57 2017 -0500 - - add SMART SNMP extend script (#101) - - * add SMART SNMP extend - - * cleanup default disk examples - - * correct a small typo - - * add option caching support - - * add checking selftest log and nolonger zeros non-existent IDs - - * now uses a config file - - * add the ability to guess at the config - - * properly remove device entries with partitions now and avoid adding dupes in a better manner - - * now have smartctl scan as well to see if it missed anything - - * note why ses and pass are ignored - - * properly use the cache file in the config now - - * actually use the cache now - -commit 94aa0feacdfc71b6d8044c66992069538071ca39 -Author: VVelox -Date: Sun Mar 19 13:03:59 2017 -0500 - - add unbound SNMP extend script (#102) - -commit 495f46afb431a0ef29fe58c40a01c7f9d352c3d5 -Author: Tony Murray -Date: Fri Mar 10 06:29:19 2017 -0600 - - Update mysql script to php7 version... (#104) - - * Update mysql script to php7 version... - - * Update mysql - -commit 61579bf0ace0a963f6ffbf9ca263910c5f6614fe -Author: Tuxis Internet Engineering V.O.F -Date: Wed Mar 8 09:51:04 2017 +0100 - - Enable ipv6 in Xinetd (#100) - - * Fix indenting and enable IPv6 in Xinetd - - * Fix changelog - - * Typo - -commit 7f79fc4167adac967d89d0ee6277f78886a5c7b9 -Author: Tony Murray -Date: Tue Mar 7 23:48:15 2017 -0600 - - Update mysql - -commit 1b1d8b491f842edc3e04c5405ae13de4f60a6751 -Author: VVelox -Date: Tue Mar 7 23:40:09 2017 -0600 - - clean up snmp/mysql_stats.php and make it a proper snmpd extend script now (#99) - - * cleanup and make it something that can properly be invoked via cli - - * blank the user/pass/host bits increasing the chances it will work out of the box - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Update mysql_stats.php - - * Rename mysql_stats.php to mysql - -commit e7c331070100290b3780ba6907add81be82165c6 -Author: VVelox -Date: Fri Mar 3 14:41:38 2017 -0600 - - add Nvidia SNMP extend poller (#94) - - * add Nvidia SNMP extend - - * update the extend path - - * now support more than 4 GPUs - - this will now support how ever many GPUs are installed on a system... - - Just double checked and it appears nvidia-smi dmon only reports up to 4 GPUs at a time... so if we have more than 4, begin checking they exist and if so print them - -commit 2308481188f72bbad12d7d94ebd941a73fc97655 -Author: VVelox -Date: Fri Mar 3 12:55:55 2017 -0600 - - add squid snmp extend (#93) - -commit 2700598925c8481641def507a4bf902a27cb01af -Author: VVelox -Date: Fri Mar 3 08:49:15 2017 -0600 - - FreeBSD NFS extends (#90) - - * add the FreeBSD NFS client and server extends - - * white space cleanup - - * white space cleanup - -commit db3b5c7cec8fa35832739e742c84fa61e465bd9f -Author: VVelox -Date: Wed Mar 1 17:46:13 2017 -0600 - - add Postgres SNMP extend (#91) - - * add Postgres SNMP extend - - * minor comment cleanups - - * use env for check_postgres.pl - - * quote the string - -commit 42e488743917fd39019ac9300caf391a5a8120c8 -Author: VVelox -Date: Wed Mar 1 12:35:06 2017 -0600 - - add detailed Postfix poller (#92) - - * add detailed postfix poller - - * env perl - -commit c4101c9ef2a8e8dffbfaee55f067c7c89fe18e27 -Merge: bb4c67b 8343e7f -Author: Tony Murray -Date: Fri Feb 24 11:10:43 2017 -0600 - - Merge pull request #84 from VVelox/master - - add a snmpd extend script for fail2ban - -commit 8343e7f34e1c382051f65bb9d7cf5bad454b934e -Author: Tony Murray -Date: Fri Feb 24 11:09:21 2017 -0600 - - Update fail2ban - -commit 4fcce9f01dd5b0c7979a2ebc95298ff40239bfd9 -Author: Tony Murray -Date: Fri Feb 24 11:02:19 2017 -0600 - - Redefining $firewalled - -commit 8bfbce68e503b2ddcdcc9619307d168b1c332df3 -Author: VVelox -Date: Thu Feb 23 09:54:38 2017 -0600 - - if cache older than 360 seconds, don't use it - -commit 0a78888889d1e67e5696bb59e2c8fff4fd76f9ff -Author: VVelox -Date: Thu Feb 23 09:13:59 2017 -0600 - - use $f2bc for getting jail status now and not just only overall status - -commit 1e160b86e46ff7023ea13d8de13fe98e52e3b270 -Author: VVelox -Date: Thu Feb 23 08:46:18 2017 -0600 - - don't reuse the variable $iptables - -commit 4b53918a7d09dc705c761c6eba3d0b68caca7159 -Author: VVelox -Date: Thu Feb 23 08:45:04 2017 -0600 - - poke the user about setting a iptables path as well - -commit 90620a8558e0b164fb2a714c007b14b1ba1b1567 -Author: VVelox -Date: Thu Feb 23 08:40:59 2017 -0600 - - misc. path cleanups - -commit 5ee0faa2c38e887b61b34fd4140ae23a8583d350 -Author: VVelox -Date: Wed Feb 22 21:58:03 2017 -0600 - - make caching optional - -commit 4ffd86f8bdbe8825ac0793c1cf0b86a886656f34 -Author: VVelox -Date: Wed Feb 22 21:42:53 2017 -0600 - - Update fail2ban - -commit 0227094c6fc9cf31d7d5f9a45a63e093b6e38aa5 -Author: VVelox -Date: Mon Feb 20 13:18:50 2017 -0600 - - track both firewall and fail2ban-client - -commit 3932875ce04c1b51b8bf4c43c9934f2b29800acb -Author: VVelox -Date: Mon Feb 20 03:50:59 2017 -0600 - - correct a comment - -commit c367e9ff9d61f9cee619c19278a2bdc6d8fc7637 -Author: VVelox -Date: Mon Feb 20 03:49:50 2017 -0600 - - now requires cron usage - -commit d90f3e879200108794beb7a2a4cc047f2938899e -Author: VVelox -Date: Sun Feb 19 23:41:51 2017 -0600 - - use fail2ban-client instead - -commit 710f38e8ff7cee520f9c7cc8ada421b6f32684c5 -Author: VVelox -Date: Sat Feb 18 00:09:12 2017 -0600 - - move this over to perl and properly check iptables - -commit 6f76427952194ca6036181c31402887e72317308 -Author: VVelox -Date: Fri Feb 17 23:08:53 2017 -0600 - - remove pointless exit - -commit 4b600ad2b41be4f338f1745320b3dbd64c5f5ba9 -Author: VVelox -Date: Fri Feb 17 23:08:25 2017 -0600 - - whoops, forgot to remove \ as well - -commit bb4c67b217fc6f553c36861d4da0c5edfd61913c -Merge: ff6ee0e e3f3bd3 -Author: Tony Murray -Date: Fri Feb 17 22:42:57 2017 -0600 - - Merge pull request #86 from florianbeer/patch-1 - - Update shebang - -commit 6955e5d410f87be4423ac86111841721292911fd -Author: VVelox -Date: Fri Feb 17 10:33:02 2017 -0600 - - don't assume it appends the jail name - -commit 8b78f863d34f24858ca3d061df02efa6213d3b3b -Author: VVelox -Date: Fri Feb 17 10:32:27 2017 -0600 - - update to check fail2ban and f2b - -commit e3f3bd3efb36ee391430d61d363afa1e8d322ae3 -Author: Florian Beer -Date: Fri Feb 17 16:37:00 2017 +0100 - - Update shebang - - With the original shebang this script didn't work on Debian and Ubuntu machines. Using `/usr/bin/env bash` makes the script more portable. - -commit ff6ee0e2bc0e84ce1b0c4276713f8cb70d3154a2 -Author: Robert Verspuy -Date: Fri Feb 17 01:46:13 2017 +0100 - - Fixed correct amount of pipeline symbols when degrees symbol is missing. (#85) - - When the script is called through xinetd/check_mk (on my system), there is no degree symbol, but a space. - Changed the script to handle both correctly - -commit 21c953d11af41e1256ecf92070fc36b999b1e084 -Merge: 1ec8f20 58d1726 -Author: kitsune -Date: Thu Feb 16 11:30:03 2017 -0600 - - Merge branch 'master' of https://github.com/librenms/librenms-agent - -commit 1ec8f204ee0c96ca0a9cf77dff7bdb0f79402462 -Author: VVelox -Date: Thu Feb 16 10:50:46 2017 -0600 - - add fail2ban snmpd extend script - -commit 58d17264c7d57978a408b800084f153857d3b3f9 -Author: rockyluke -Date: Thu Feb 16 16:12:01 2017 +0100 - - Use command -v instead binary path (#80) - -commit 60becc9b3a9429a42faae18440821b90ac6586fc -Author: VVelox -Date: Thu Feb 16 09:10:53 2017 -0600 - - add a single pool php-fpm net-snmpd extend poller (#83) - -commit 677bd4187e90211a70419e01fe97a809c6cabfd0 -Author: VVelox -Date: Wed Feb 15 11:37:18 2017 -0600 - - add a single pool php-fpm net-snmpd extend poller - -commit 575956cae3ea5fcb014db3777a83e52026f95baa -Author: crcro -Date: Fri Jan 20 10:47:30 2017 +0200 - - snmp: exim-stats (#79) - - * exim-stats frozen mails - - * added total queue info - -commit d090686b722a1b0d8ded3ebfedec5c3b0f8a46a3 -Merge: ae43e5f dc60463 -Author: Tony Murray -Date: Wed Dec 14 16:39:59 2016 -0600 - - Merge pull request #75 from bungojungo/master - - Added fedora support to distro/os-updates - -commit ae43e5f493941aab81c96e3dc9378da434b55ce6 -Merge: 6c130ea de1f177 -Author: Tony Murray -Date: Wed Dec 14 16:39:47 2016 -0600 - - Merge pull request #73 from paulgear/master - - Make ups-nut work on Debian Jessie - -commit 6c130ea65e191d76a12b7d6d31d4726937b0f3e4 -Merge: e527768 3d061d2 -Author: Tony Murray -Date: Wed Dec 14 16:34:10 2016 -0600 - - Merge pull request #76 from murrant/powerdns-python26 - - Support python2.6 for powerdns scripts - -commit 3d061d24079d0dcb7458a75b3d83d5aaba43acc9 -Author: Tony Murray -Date: Wed Dec 14 16:27:15 2016 -0600 - - Support python2.6 for powerdns scripts - fixes #67 - -commit dc604636bccd8779bd261b013af4872cad14e1f0 -Author: Jason Scalia -Date: Wed Dec 7 22:11:48 2016 -0500 - - added fedora/dnf support - -commit 8b3ca2dac293ef132f1e48afa871b7158d692d90 -Author: Jason Scalia -Date: Wed Dec 7 21:48:22 2016 -0500 - - Added fedora support - -commit de1f1775cc26aacb931141182c212de706b80b5f -Author: Paul Gear -Date: Sat Dec 3 14:16:54 2016 +1000 - - Restore previous default UPS name - -commit 465ec12dd4757baa95560b11f89a433f05fb7454 -Author: Paul Gear -Date: Sat Dec 3 14:07:02 2016 +1000 - - Make ups-nut work on Debian Jessie - - This script was broken on Debian Jessie (and probably - all other Debian-based distros, including Ubuntu). - This commit removes the hard-coding of paths and uses - $PATH per normal bash practice, and should work on a - wider range of shell variants. - -commit e52776889cea5e3379422ce4ffb7171bba4fbdf1 -Author: arrmo -Date: Sat Nov 26 02:12:41 2016 -0600 - - Update to Distro (to support dd-wrt) (#72) - -commit c5fea261dea71cc9600936455bdf357cc062b220 -Author: Mathias B -Date: Thu Nov 17 09:31:56 2016 +0100 - - Add Debian support (#71) - - Before that only Ubuntu was supported, now Debian users can use this nice script too! - -commit 36ed3f008c6f2a0cc0be0cdb1ce9199a6e495dbc -Author: Karl Shea -Date: Sat Oct 8 15:26:07 2016 -0500 - - Agent: script to collect data from GPSD (#69) - -commit 91c251fd94d73f44e8757b242db82ed240f80a1d -Author: Tuxis Internet Engineering V.O.F -Date: Wed Oct 5 11:06:48 2016 +0200 - - fix: a dirty hack to prevent failing of stats when the cluster is rebuilding (#68) - - because Ceph returns '-inf' which the json decompiler doesn't seem to get.. - -commit dd365168a5eedf655d87e34e89664b191f855a15 -Author: crcro -Date: Mon Oct 3 21:27:56 2016 +0300 - - fix conflict (#66) - -commit 58e16b794a0e33d0dd71d8c1f936bc8b25ad7ced -Author: crcro -Date: Sun Sep 25 16:28:37 2016 +0300 - - snmp-extend: os-updates (#65) - - * reverted back to os-release checks, added arch pacman - - * fixed file name - -commit 2699cde73fcbca9e556a762dcfd90c81e5561d26 -Author: crcro -Date: Sun Sep 25 16:28:00 2016 +0300 - - snmp-extend: ups-apcups (#58) - - * snmp-extend-ups-apcups - - * rewrite of apc ups - - * header fix - - * header fix - -commit fa308bfe3f388f110e9df083d6b2c649fa69472e -Author: crcro -Date: Sat Sep 24 20:30:09 2016 +0300 - - snmp-extend: ups-nut update 1 (#63) - - * new code for better matching, snmp-extend compliance - - * removed unused vars - - * extra fixes - - * removed the need of tmp file - - * removed charge_low, deemed useless by user - - * removed values that are not plottable - - * readded ds - -commit f63c4ab7bea382b08d0450b42a374db082ccd0ef -Merge: c1c537e d9f36a8 -Author: Tony Murray -Date: Mon Sep 12 22:01:51 2016 -0500 - - Merge pull request #61 from crcro/app-ntp-server-update-1 - - app: ntp-server update 1 - -commit c1c537eea11fde70435e88b28b17292dc7c72f75 -Merge: 9a2716d 11a9fce -Author: Tony Murray -Date: Mon Sep 12 22:01:24 2016 -0500 - - Merge pull request #57 from crcro/snmp-extend-ups-nut - - snmp-extend: ups-nut - -commit 9a2716dc83ad11462495e5ee804fb122eb402faa -Merge: 87cc835 85ae77c -Author: Tony Murray -Date: Mon Sep 12 19:33:07 2016 -0500 - - Merge pull request #60 from crcro/remove-ntp-php - - remove obsolete ntp scripts - -commit d9f36a84b13dd42361d24df11d6cb60c7b71f260 -Author: crcro -Date: Mon Sep 12 12:48:17 2016 +0300 - - cleaner code - -commit 28cae5cff3b87532fd145c55de5b22aa0f4c6d05 -Author: crcro -Date: Mon Sep 12 11:52:13 2016 +0300 - - better handling default case - -commit aeecb1621c8ed5863d5c7563ffc96047909b8cfa -Author: crcro -Date: Mon Sep 12 11:45:14 2016 +0300 - - header fix - -commit f48f4cc6e513773fac094d6b3115954deaeacbc7 -Author: crcro -Date: Mon Sep 12 11:43:34 2016 +0300 - - update 1 ntp server - -commit 87cc835096ffdd4f8310b51e684f63aa7726d14d -Author: crcro -Date: Sat Sep 10 19:08:03 2016 +0300 - - os-updates.sh clean (#59) - -commit 85ae77c01c28308dd1f58b897aa7c8efe5b87386 -Author: crcro -Date: Sat Sep 10 04:50:33 2016 +0300 - - remove obsolete ntpd-server.php - -commit 262f798a9737a5b62bef0ab7a657782a934b86ac -Author: crcro -Date: Sat Sep 10 04:48:55 2016 +0300 - - remove obsolete ntp-client.php script - -commit 11a9fcef62571e12168b8c1e9d1ac604b65c227d -Author: crcro -Date: Fri Sep 9 15:36:01 2016 +0300 - - snmp-extend-ups-nut - -commit 6128dc3c7133802ff66b199bc99289fb07761d6e -Author: vectr0n -Date: Fri Sep 9 02:16:28 2016 -0400 - - Update hddtemp to include hddtemp -w option (#56) - - hddtemp gives inconsistent values in it's current state, after some debugging I was able to resolve the issue by passing -w to the hddtemp command, this will wake-up the drive if it is in a sleep state to gather information. - -commit 42bc0a07aab450e242471e271380fc29642b34e7 -Author: crcro -Date: Wed Sep 7 22:37:31 2016 +0300 - - ntp-client app using shell only, tested with ntpq 4.2.8p8 (#54) - -commit 718d627cfdbad19848a384fc8eaba332dcaef504 -Author: crcro -Date: Wed Sep 7 22:37:23 2016 +0300 - - app: ntp-server (#55) - - * ntp-server app using shell only, tested with 4.2.8p8 - - * fix for higher stratum value - - * change the description in comment to reflect latest webui push - -commit 351e5aa7bc6f1a79d51b1bd098cace659c1b0e9f -Author: Tatermen -Date: Sun Aug 28 20:06:04 2016 +0100 - - Freeswitch (#53) - - feature: Added freeswitch support - -commit 839b518358d2acb488c3d7709e12392ee2b4c224 -Merge: 6a84755 561efa4 -Author: Neil Lathwood -Date: Tue Aug 23 21:48:08 2016 +0100 - - Merge pull request #52 from murrant/move-scripts - - SNMP extend scripts from the main repo - -commit 561efa41be5e22614912300ac9242582340e0662 -Author: Tony Murray -Date: Mon Aug 22 21:35:13 2016 -0500 - - SNMP extend scripts from the main repo - -commit 6a84755105f651d03939310b4bd5a3cd85dc90dd -Merge: c2e4c33 deb3683 -Author: Tony Murray -Date: Sun Aug 21 19:58:13 2016 -0500 - - Merge pull request #51 from crcro/dhcp_pretty - - rewrite dhcp-stats with loop - -commit deb36833f17d31ddd6176aa7dfc3767817e7c446 -Author: crcro -Date: Mon Aug 22 01:45:23 2016 +0300 - - @paulgear recomandation - -commit c2e4c33abf5edbc0b7a5a00f8871f87d4d0f0513 -Merge: 672918c 9cd81f1 -Author: Tony Murray -Date: Wed Aug 17 09:59:11 2016 -0500 - - Merge pull request #50 from OpcaoTelecom/unbound - - Added unbound stats script - -commit 9cd81f1b930e2ed777ecf3bf6c7deff65df6e564 -Author: Alan Gregory -Date: Wed Aug 17 09:36:39 2016 -0300 - - Added unbound stats script - -commit 672918c40fd87455398267cbf744a52362f738a7 -Merge: 9fe5444 87584e7 -Author: Tony Murray -Date: Tue Aug 16 12:43:10 2016 -0500 - - Merge pull request #48 from crcro/raspberry-sensors - - raspberry sensors - -commit 9fe5444738d086b1d33f92ca0e5905a14cd9c8a0 -Merge: c3afbf3 b6bdb9e -Author: Tony Murray -Date: Mon Aug 15 22:57:52 2016 -0500 - - Merge pull request #49 from murrant/ntp - - Copy ntp scripts from the main repo. - -commit b6bdb9ea45d579becc8f858090e8b7d3e4c809ea -Author: Tony Murray -Date: Mon Aug 15 22:56:31 2016 -0500 - - Copy ntp scripts from the main repo. - -commit 87584e7ef79996db60cd62e64dd4cbaf53a0bac8 -Author: crcro -Date: Sun Aug 14 17:43:27 2016 +0300 - - added snmp extend to get raspberry sensors - -commit c3afbf35bd81bff0dbcdb67e6657dd042ae67588 -Merge: 9623342 aa59548 -Author: Neil Lathwood -Date: Tue Aug 9 19:47:51 2016 +0100 - - Merge pull request #45 from murrant/os-updates - - Do not detect os, detect package managers - -commit 9623342554317ba55f7a987d18250e941a0a7c1f -Merge: 0f5a115 7828777 -Author: Tony Murray -Date: Tue Aug 9 13:08:41 2016 -0500 - - Merge pull request #46 from murrant/distro - - Update distro to match the main repo file - -commit aa59548e0c3d6e5462cd2342ca671dc72430c3f1 -Author: Tony Murray -Date: Tue Aug 9 12:50:23 2016 -0500 - - Do not detect os, detect package managers. - Add pacman support. - -commit 78287777696f6569dfe575770f1c47553fddd5a9 -Author: Tony Murray -Date: Tue Aug 9 11:40:01 2016 -0500 - - Update distro to match the main repo file - -commit 0f5a1150f373371fc508e160e58c56cea5adbb99 -Merge: d6308e4 05fe3f8 -Author: Neil Lathwood -Date: Thu Aug 4 18:53:10 2016 +0100 - - Merge pull request #40 from florianbeer/patch-1 - - Add Debian and make update call more robust - -commit d6308e4e1c04d69688d724c7c5c04ab0a3c94fbc -Merge: 3740f3e 2accc28 -Author: Neil Lathwood -Date: Wed Aug 3 21:09:08 2016 +0100 - - Merge pull request #42 from crcro/app-dhcp-stats - - app-dhcp-stats snmp extend - -commit 2accc2848c44f8c2c33a455eb1a2e4ffe801921c -Author: crcro -Date: Wed Aug 3 22:09:55 2016 +0300 - - app-dhcp-stats snmp extend - -commit 05fe3f8cc195b797f69b0599ca2a2e198f0b5d0c -Author: Florian Beer -Date: Wed Aug 3 12:16:22 2016 +0200 - - Remove update call as this requires root - - See discussion here https://github.com/librenms/librenms-agent/pull/40#issuecomment-237198796 - -commit fac01628a07cf8083f91d9924ab8d63a9d4141db -Author: Florian Beer -Date: Wed Aug 3 04:51:35 2016 +0200 - - Add Debian and make update call more robust - - - Debian based systems need to update the index before being able to report upgradable packages. - - Debian old-stable doesn't have `apt` yet and Ubuntu 14.04 emits the following warning when using `apt` in a script: - `WARNING: /usr/bin/apt does not have a stable CLI interface yet. Use with caution in scripts.` - - By using `apt-get`, issuing a `update` call first and then counting the result of `grep 'Inst'`, this script now works on Debian 7, Debian 8, Ubuntu 14.04 and Ubuntu 16.04. - -commit 3740f3e147d7d97e10e4b8e77757ab67deb2bb84 -Merge: fb678cb 1964aec -Author: Tony Murray -Date: Tue Aug 2 20:35:16 2016 -0500 - - Merge pull request #38 from crcro/master - - app: nfs-v3-stats - -commit fb678cb58df6277be2176e8a45a08af1d8dcb8d5 -Merge: 1d4c452 8d7e0df -Author: Tony Murray -Date: Mon Aug 1 11:26:27 2016 -0500 - - Merge pull request #39 from xbeaudouin/fix_distro_freebsd - - Add FreeBSD detection to distro script - -commit 8d7e0df4eb1e35b776aa17d2e6c2ea202cc021a7 -Author: xavier.beaudouin -Date: Mon Aug 1 11:15:52 2016 +0200 - - Add FreeBSD detection to distro script - -commit 1d4c4529ae907b343b7ffcb6eaeb94563ad2cb69 -Merge: dde18e9 760f9de -Author: Paul Gear -Date: Sat Jul 30 14:14:39 2016 +1000 - - Merge pull request #37 from xbeaudouin/master - - Fix some bash scripts to work with FreeBSD - -commit 1964aece5e421391cc6cb589c668da0b5f2eeaee -Author: crcro -Date: Fri Jul 29 20:22:35 2016 +0300 - - added snmp extend script for os-updates application - -commit 57b6224254eb3992e09358df2d867573512f6809 -Author: crcro -Date: Fri Jul 29 20:19:41 2016 +0300 - - added snmp extend script for nfs-v3-stats application - -commit 760f9de567a2876b0ad793979754661946b92c5c -Author: xavier.beaudouin -Date: Fri Jul 29 13:23:20 2016 +0200 - - /bin/bash => /usr/bin/env bash to allow freebsd agent work without patching each files - -commit dde18e98954c83fb52ae89083214814b5515a6c1 -Merge: 18f4006 9a3846c -Author: Neil Lathwood -Date: Tue Jul 26 20:46:20 2016 +0100 - - Merge pull request #36 from murrant/powerdns-recursor - - PowerDNS Recursor agent - -commit 18f4006e09a1436013eee8ed77927585f714fc43 -Merge: f75fc9f fc07e27 -Author: Neil Lathwood -Date: Tue Jul 26 20:45:38 2016 +0100 - - Merge pull request #33 from murrant/mysql-php7 - - Use mysqli instead of mysql - -commit f75fc9fce5a82c47e1303f5514eb0c421ad5cf93 -Merge: bfdf71d c70d12c -Author: Tony Murray -Date: Fri Jul 22 21:13:58 2016 -0500 - - Merge pull request #35 from murrant/duplicate-nfsstats - - Remove duplicate nfsstats file - -commit 9a3846cac30515a7a01a44ecc9fc6e08e78df1f5 -Author: Tony Murray -Date: Fri Jul 22 15:33:16 2016 -0500 - - PowerDNS Recursor agent - -commit c70d12c83c00e180da8a7e8281acdbd8e4741fa1 -Author: Tony Murray -Date: Fri Jul 22 15:22:48 2016 -0500 - - Remove duplicate nfsstats file - -commit bfdf71d6995ced14ebd1e25042a60c7107a57dc0 -Merge: 41cb583 9501c2f -Author: Tony Murray -Date: Thu Jul 21 22:30:29 2016 -0500 - - Merge pull request #34 from murrant/nfs - - Copy nfsstats script from main repo. - -commit 9501c2f4ffd4649982521c387b3d9dcab1de83d9 -Author: Tony Murray -Date: Thu Jul 21 22:28:41 2016 -0500 - - Copy nfsstats script from main repo. - Send PR to remove scripts from the main repo. - -commit fc07e27c37c74d47c61aeac3cb966062f8da63a2 -Author: Tony Murray -Date: Thu Jul 21 22:26:02 2016 -0500 - - Fix permissions - -commit 41cb5835ff3b0ca41a6392f19e43d590bd08d785 -Merge: db44c10 9bad4df -Author: Tony Murray -Date: Thu Jul 21 21:48:27 2016 -0500 - - Merge pull request #32 from tuxis-ie/proxmox-issue-28 - - Proxmox issue 28 - -commit e80b025818f2f993f4443be3100c5bcd1331812a -Author: Tony Murray -Date: Thu Jul 21 21:31:25 2016 -0500 - - Use mysqli instead of mysql - -commit 9bad4dfb3e586d7892709284cccf17417cf5ec03 -Author: Mark Schouten -Date: Wed Jul 13 15:06:57 2016 +0200 - - Something like this @einarjh ? - -commit 6d27c7edb3f4972a89fbf5641c4ece106b5dbc09 -Author: Mark Schouten -Date: Mon Jul 11 17:06:14 2016 +0200 - - Wrap these calls in an eval to prevent it from dying if its a container instead of a qemu vm. Fixes #28 - -commit db44c1070950c2e06565a39395bb09f09a023b4a -Merge: d00ce4a 5b21301 -Author: Neil Lathwood -Date: Sat Jul 9 19:12:59 2016 +0100 - - Merge pull request #31 from librenms/nfsstats - - Added nfsstats.sh file - -commit 5b21301ecdb761fa0e32f9295c8ea60aef44f3a7 -Author: Neil Lathwood -Date: Sat Jul 9 19:12:13 2016 +0100 - - Added nfsstats.sh file - -commit d00ce4a15a6b52753d108d1aeb2a768e7bfafe36 -Merge: c996b54 ca5a5a1 -Author: Neil Lathwood -Date: Thu Jun 30 08:57:07 2016 +0100 - - Merge pull request #29 from murrant/powerdns-python3 - - Python3 fixes for powerdns agent. Compatible with python2. - -commit ca5a5a12c065eb67e48410ed09ff97630a76f6b8 -Author: Tony Murray -Date: Wed Jun 29 19:52:10 2016 -0500 - - Python3 fixes for powerdns agent. Compatible with python2. - -commit c996b54e79b317785c58963abb6f71c31e61ba10 -Merge: fb7912b 8328d71 -Author: Neil Lathwood -Date: Thu Jun 9 11:38:43 2016 +0100 - - Merge pull request #27 from murrant/rrdcached - - Local script to collect stats from rrdcached - -commit 8328d71c0995fa8f6dc7c50de940fbe9b242fc41 -Author: Tony Murray -Date: Wed Jun 8 20:35:19 2016 -0500 - - Local script to collect stats from rrdcached - Being able to connect to local unix sockets is the primary advantage of this. - -commit fb7912beda4181b23d8cbbbf500a1e7ed4527001 -Merge: 601ac84 8d856e2 -Author: Daniel Preussker -Date: Thu May 5 13:32:02 2016 +0200 - - Merge pull request #25 from Exa-Omicron/master - - Improved hddtemp agent module - -commit 8d856e27648b6df2d89af852ad1cd912319a965f -Author: Robert Verspuy -Date: Thu May 5 10:27:30 2016 +0200 - - Improved hddtemp agent module - - I had some issues with the netcat / daemon implementation of the module. - netcat was stallingor sometimes netcat did not return the full output of hddtemp. - Running hddtemp directly without running it as a daemon is much more stable for me. - - This new version also does not give any stdout output when hddtemp is not installed or when no disks can be found. - Running the script manually on a server does give stderr output for easy debugging. - -commit 601ac843c303d29b8149142a3fac967aaa4a2638 -Merge: 21817b6 1c13779 -Author: Tony Murray -Date: Thu Apr 21 09:46:49 2016 -0500 - - Merge pull request #23 from librenms/freebsd-agent - - Create check_mk_agent_freebsd - -commit 1c1377958e6c8cfd8ca7fd1fd4fcafdae92e1a1b -Author: Neil Lathwood -Date: Thu Apr 21 15:41:06 2016 +0100 - - Update check_mk_agent_freebsd - -commit cdd235a12a0bd4d0cbffe330048fd476aa5fddd5 -Author: Neil Lathwood -Date: Thu Apr 21 15:39:59 2016 +0100 - - Create check_mk_agent_freebsd - - Added freebsd agent - -commit 21817b6b36692bdca8fac8f3ee4a0258a2d2bcee -Author: Tony Murray -Date: Tue Mar 29 08:29:02 2016 -0500 - - Fix wording for systemd unit - -commit 88c4b00b19370bea3e597770793d90b24f24b10b -Merge: dd2b95d 50a3c25 -Author: Neil Lathwood -Date: Tue Mar 29 09:51:00 2016 +0100 - - Merge pull request #22 from murrant/master - - Add systemd unit files - -commit 50a3c25115e501db4bd9fc97a8a8e3b7d81a635e -Author: Tony Murray -Date: Mon Mar 28 12:56:26 2016 -0500 - - Add systemd unit files - -commit dd2b95d8d2eb35bf1b3f0aea34d843af33f1c28e -Merge: 6d0babe ff2bbe6 -Author: Neil Lathwood -Date: Wed Nov 25 13:37:25 2015 +0000 - - Merge pull request #17 from f0o/upstream-snapshot - - Snapshot upstream changes - -commit ff2bbe6882a9b79b93883980b0360f780fc24d76 -Author: f0o -Date: Wed Nov 25 13:26:26 2015 +0000 - - Snapshot upstream changes - -commit 6d0babe0973d5cb8e2d35fd33e2f45e96ae96c15 -Merge: 8e847b9 12e31c1 -Author: Daniel Preussker -Date: Wed Nov 25 13:28:17 2015 +0000 - - Merge pull request #16 from tuxis-ie/powerdns-support - - Powerdns support - -commit 12e31c16c3c42e6d1c73a196978acf18e554e4b0 -Author: Mark Schouten -Date: Mon Nov 23 14:10:17 2015 +0100 - - Add PowerDNS Authoritative Agent - -commit d16462bb5ac978cfd5b7cb213359989b2aabc791 -Author: Mark Schouten -Date: Mon Nov 23 14:10:15 2015 +0100 - - Add PowerDNS Authoritative Agent - -commit 8e847b986aa3af50eb6c2302c3d1f0df158a47bd -Merge: da7e40c 66d5028 -Author: Neil Lathwood -Date: Wed Nov 11 17:17:24 2015 -0400 - - Merge pull request #15 from SaaldjorMike/mysql1 - - Moved mysql tag a bit up and added a newline to error msg. - -commit 66d502837d2643c59d7f87af076fd851b0ba12c1 -Author: Mike Rostermund -Date: Wed Nov 11 14:21:49 2015 +0100 - - Moved mysql tag a bit up and added a newline to error msg. - -commit da7e40c43eb3155d3253c1eb695a78a0d9362a51 -Merge: f6f0079 0cc7b49 -Author: Neil Lathwood -Date: Tue Nov 10 08:08:34 2015 -0400 - - Merge pull request #14 from tuxis-ie/ceph-support - - Ceph support - -commit 0cc7b493978c06f0f3e73749bac1fbadf56c1be8 -Author: Mark Schouten -Date: Tue Nov 10 11:00:58 2015 +0100 - - Add support for Ceph - -commit 9b4c3b34009a441df579051336bf3ea0647fe73c -Author: Mark Schouten -Date: Tue Nov 10 10:58:24 2015 +0100 - - Add support for Ceph - -commit f6f0079c6620ee3d75adf7511006006353903dd3 -Merge: d90957a 30b7651 -Author: Daniel Preussker -Date: Wed Nov 4 13:42:29 2015 +0000 - - Merge pull request #13 from tuxis-ie/master - - Crap, forgot this line... - -commit 30b7651e0142826202276a7bf9a31343d759c68a -Author: Mark Schouten -Date: Wed Nov 4 14:40:19 2015 +0100 - - Crap, forgot this line... - -commit d90957a0bc9e484056eaf26b206672b940fc7a9f -Merge: 25fcd5a 6554087 -Author: Daniel Preussker -Date: Wed Nov 4 13:35:33 2015 +0000 - - Merge pull request #12 from tuxis-ie/master - - Fix the proxmox-agent for Proxmox VE 4.0 - -commit 65540872e7a1215cfdca1d4b480670a67cf50a77 -Author: Mark Schouten -Date: Wed Nov 4 14:30:21 2015 +0100 - - Fix the proxmox-agent for Proxmox VE 4.0 - -commit 25fcd5ae76682006ed61aa09212738381968208f -Merge: 20e2d22 b6bfbba -Author: Paul Gear -Date: Mon Oct 26 09:39:15 2015 +1000 - - Merge pull request #10 from librenms/laf-patch-1 - - Update distro to use env - -commit b6bfbbaf2c99945aceb92e9c7f950a53196c26fc -Author: Neil Lathwood -Date: Sun Oct 25 21:51:43 2015 +0000 - - Update distro to use env - -commit 20e2d220bde9e4edec76d00551c955274d06130c -Merge: 87a20db 2b96259 -Author: Daniel Preussker -Date: Fri Aug 28 09:07:49 2015 +0000 - - Merge pull request #7 from tuxis-ie/master - - Add a proxmox-agent - -commit 2b9625953240ade30cf5ccef22a9293a016b819b -Author: Mark Schouten -Date: Fri Aug 28 10:52:04 2015 +0200 - - Add license - -commit d6795c60a171eba023b8c0e5b151376c6bcfa0d1 -Author: Mark Schouten -Date: Fri Aug 28 10:49:24 2015 +0200 - - Add proxmox-agent - -commit fee2ed820bedb4613871aa9747b40121e3ae7879 -Author: Mark Schouten -Date: Fri Aug 28 10:49:19 2015 +0200 - - Add proxmox-agent - -commit 87a20db845517070fdb2eec70d264e18bfde2871 -Merge: 8ae2b15 6493263 -Author: Daniel Preussker -Date: Thu Aug 20 17:14:11 2015 +0000 - - Merge pull request #5 from tuxis-ie/master - - Add files to create a Debian-package - -commit 64932630f0b67e876d0859df491705b11a71aa07 -Author: Mark Schouten -Date: Thu Aug 20 14:18:10 2015 +0200 - - Do not include the README in the repodir - -commit 77864124dc119b0d89b1c852090e5f283b02123a -Author: Mark Schouten -Date: Thu Aug 20 10:34:50 2015 +0200 - - Add license - -commit 8ae2b1520b9e75583b87977427415c90256473e1 -Merge: 69551b0 63d3166 -Author: Daniel Preussker -Date: Tue Aug 18 15:14:00 2015 +0000 - - Merge pull request #6 from librenms/f0o-mysql-host-logic - - Fix MySQL Host Logic - -commit 63d31665cea2afaeadb8c8ba1b58b37605597b80 -Author: Daniel Preussker -Date: Tue Aug 18 15:08:50 2015 +0000 - - Fix MySQL Host Logic - -commit 51270e24c19bed95030a41e3ab7828bb2330d68d -Author: Mark Schouten -Date: Mon Aug 17 16:58:33 2015 +0200 - - Also include distro in this package - -commit 2b4d17280dd4cbff1b497e2f6ffc17bf75020ea9 -Author: Mark Schouten -Date: Mon Aug 17 16:57:48 2015 +0200 - - Strip comments (on Qemu boxes, this pollutes a lot - -commit 2833310e228e185e78ddbb96589f63e9d2d7b852 -Author: Mark Schouten -Date: Mon Aug 17 16:50:26 2015 +0200 - - Enable dpkg and dmi by default - -commit 3cd06768b5487261ddde819aad6428a3183ffbbf -Author: Mark Schouten -Date: Mon Aug 17 16:48:22 2015 +0200 - - Place all plugins in a repo-dir and add mk_enplug to enable plugins - -commit 7954d5a085f0ffe31fa1becb6d3132ca63b46942 -Author: Mark Schouten -Date: Mon Aug 17 16:19:04 2015 +0200 - - Add Conflicts/Provides and fix location for xinetd.d - -commit a7df28415a4645293835c79d15201539376be11d -Author: Mark Schouten -Date: Mon Aug 17 15:12:12 2015 +0200 - - Add files to create a Debian-package - -commit 69551b05e2673c899077a4539d1b6a6ec95b4290 -Merge: cfec5ec 4683c68 -Author: Daniel Preussker -Date: Tue Jul 28 20:11:44 2015 +0000 - - Merge pull request #4 from alangregory/master - - Added Snmpd.conf example and distro executable - -commit 4683c68d1d23f63ff9977c8a11543004cd4b8a34 -Author: Alan Gregory -Date: Tue Jul 28 15:58:29 2015 -0300 - - Added Snmpd.conf example and distro executable - -commit cfec5ec65dc93a6bc9260eb4f1d3f9379d1c7287 -Author: Daniel Preussker -Date: Tue Jun 9 17:34:00 2015 +0000 - - Delete README.md - -commit f1c9d6578a9f5df51047e5246624a96e55e043d4 -Merge: a47d95b 195a46c -Author: Daniel Preussker -Date: Mon May 18 13:07:29 2015 +0200 - - Merge pull request #1 from f0o/master - - Initial commit - -commit 195a46c1e377f6729acf38f294153ef40147d2ff -Author: f0o -Date: Mon May 18 10:57:45 2015 +0000 - - Initial commit - -commit a47d95b58cc05e32a3feaa7f0022857da80ba58a -Author: Daniel Preussker -Date: Mon May 18 09:28:15 2015 +0000 - - Initial commit From 91940e6e501f34e3fc93a5badc312e4344ccfe5d Mon Sep 17 00:00:00 2001 From: Anael Mobilia Date: Tue, 2 Jun 2020 17:37:23 +0200 Subject: [PATCH 257/497] Add backupninja snmp script (#264) * Add backupninja snmp script * jsonify output * Better codestyle & apply JSON requirements * Optimize file using redis.py example * Properly use of the LibreNMS json format * typo --- snmp/backupninja.py | 39 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 snmp/backupninja.py diff --git a/snmp/backupninja.py b/snmp/backupninja.py new file mode 100644 index 000000000..ce9408d67 --- /dev/null +++ b/snmp/backupninja.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +import io +import re +import os +import json + +version = 1 +error = 0 +error_string = '' + +logfile = '/var/log/backupninja.log' + +backupninja_datas = { + 'last_actions': 0, + 'last_fatal': 0, + 'last_error': 0, + 'last_warning': 0} + +if not os.path.isfile(logfile): + error_string = 'file unavailable' + error = 1 + break + +with io.open(logfile,'r') as f: + for line in reversed(list(f)): + match = re.search('^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$', line) + if match: + backupninja_datas['last_actions'] = int(match.group(2)) + backupninja_datas['last_fatal'] = int(match.group(3)) + backupninja_datas['last_error'] = int(match.group(4)) + backupninja_datas['last_warning'] = int(match.group(5)) + break + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': backupninja_datas} + +print(json.dumps(output)) From 3a94888f6b1e79ea1fd74e8e4852105343427b15 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 14 Jun 2020 23:46:58 +0200 Subject: [PATCH 258/497] Application sudo correction (#306) --- snmp/pureftpd.py | 2 +- snmp/raspberry.sh | 2 -- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index 1c768e1ba..a2c75672e 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -34,7 +34,7 @@ output_data['errorString'] = "Configfile Error: '%s'" % e -output = os.popen('sudo ' + pureftpwho_cmd + ' ' + pureftpwho_args).read() +output = os.popen(pureftpwho_cmd + ' ' + pureftpwho_args).read() data = {} diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index a2f924102..df4ffcea4 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -19,8 +19,6 @@ getStatusMPG4='codec_enabled MPG4' getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' -[ $(id -u) -eq 0 ] || picmd="sudo $picmd" - $picmd $getTemp | $pised 's|[^0-9.]||g' $picmd $getVoltsCore | $pised 's|[^0-9.]||g' $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' From 624be75e4e360947b91820fb30064f05f0c4e12f Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Wed, 17 Jun 2020 09:26:08 +1200 Subject: [PATCH 259/497] add check_mrpe script (#192) * add check_mrpe script * recommit Co-authored-by: slashdoom --- agent-local/check_mrpe | 60 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) create mode 100644 agent-local/check_mrpe diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe new file mode 100644 index 000000000..e80c62d70 --- /dev/null +++ b/agent-local/check_mrpe @@ -0,0 +1,60 @@ +#!/usr/bin/env bash + +VERSION="0.1 (beta)" + +IFS=$'\n\t' +GLOBIGNORE="*" + +BIN_NC="$(command -v nc)" +BIN_SED="$(command -v sed)" + +pval="6556" + +EXITCODE=0 + +while getopts "Vha:H:p:" opt; do + case $opt in + V) printf "check_mrpe v$VERSION\n" + exit $EXITCODE + ;; + h) hflag=1 + ;; + a) aflag=1 + aval="$OPTARG" + ;; + H) Hflag=1 + Hval="$OPTARG" + ;; + p) pflag=1 + pval="$OPTARG" + ;; + \?) hflag=1 + EXITCODE=1 + ;; + esac +done + +if ! [ "$Hflag" ] && ! [ "$hflag" ]; then + printf "Error: Host not specified\n" + hflag=1 + EXITCODE=1 +fi + +if [ "$hflag" ]; then + printf "check_mrpe:\n" + printf "Usage:\n" + printf " check_mrpe [-H host] [-p port] [-a app]\n" + exit $EXITCODE +fi + +if [ "$aflag" ]; +then + SED_CMD="s/\((.*) $aval [0-9] \)\(.*\)/\2/p" +else + SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" +fi + +for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<< Date: Wed, 17 Jun 2020 00:04:29 +0200 Subject: [PATCH 260/497] DHCP Agent Update (#303) --- snmp/dhcp-status.sh | 36 ------------ snmp/dhcp.py | 135 ++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 135 insertions(+), 36 deletions(-) delete mode 100755 snmp/dhcp-status.sh create mode 100755 snmp/dhcp.py diff --git a/snmp/dhcp-status.sh b/snmp/dhcp-status.sh deleted file mode 100755 index a629d0a32..000000000 --- a/snmp/dhcp-status.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -################################################################ -# copy this script to somewhere like /opt and make chmod +x it # -# edit your snmpd.conf add the below line and restart snmpd # -# extend dhcpstats /opt/dhcp-status.sh # -################################################################ -FILE_DHCP='/var/lib/dhcp/db/dhcpd.leases' -BIN_CAT='/usr/bin/cat' -BIN_GREP='/usr/bin/grep' -BIN_TR='/usr/bin/tr' -BIN_SED='/usr/bin/sed' -BIN_SORT='/usr/bin/sort' -BIN_WC='/usr/bin/wc' - -CONFIGFILE=/etc/snmp/dhcp-status.conf -if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE -fi - -DHCP_LEASES='^lease' -DHCP_ACTIVE='^lease|binding state active' -DHCP_EXPIRED='^lease|binding state expired' -DHCP_RELEASED='^lease|binding state released' -DHCP_ABANDONED='^lease|binding state abandoned' -DHCP_RESET='^lease|binding state reset' -DHCP_BOOTP='^lease|binding state bootp' -DHCP_BACKUP='^lease|binding state backup' -DHCP_FREE='^lease|binding state free' -NO_ERROR='[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3} binding' - -$BIN_CAT $FILE_DHCP | $BIN_GREP $DHCP_LEASES | $BIN_SORT -u | $BIN_WC -l - -for state in "$DHCP_ACTIVE" "$DHCP_EXPIRED" "$DHCP_RELEASED" "$DHCP_ABANDONED" "$DHCP_RESET" "$DHCP_BOOTP" "$DHCP_BACKUP" "$DHCP_FREE" -do - $BIN_GREP -E "$state" $FILE_DHCP | $BIN_TR '\n' '|' | $BIN_SED 's/ {| //g' | $BIN_TR '|' '\n' | $BIN_GREP -E "$NO_ERROR" | $BIN_SORT -u | $BIN_WC -l -done diff --git a/snmp/dhcp.py b/snmp/dhcp.py new file mode 100755 index 000000000..12937370e --- /dev/null +++ b/snmp/dhcp.py @@ -0,0 +1,135 @@ +#!/usr/bin/env python3 + +import subprocess +import json +from os.path import isfile + +CONFIGFILE = '/etc/snmp/dhcp.json' + +# Configfile is needed /etc/snmp/dhcp.json +# +# {"leasefile": "/var/lib/dhcp/dhcpd.leases" +# } +# + +error = 0 +error_string = '' +version = 2 + +with open(CONFIGFILE, 'r') as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as e: + error = 1 + error_string = "Configfile Error: '%s'" % e + + +if not error: + leases = {'total': 0, + 'active': 0, + 'expired': 0, + 'released': 0, + 'abandoned': 0, + 'reset': 0, + 'bootp': 0, + 'backup': 0, + 'free': 0, + } + if not isfile(configfile['leasefile']): + error = 1 + error_string = 'Lease File not found' + else: + with open(configfile['leasefile']) as fp: + line = fp.readline() + while line: + line = fp.readline() + + if 'rewind' not in line: + if line.startswith('lease'): + leases['total'] += 1 + elif 'binding state active' in line: + leases['active'] += 1 + elif 'binding state expired' in line: + leases['expired'] += 1 + elif 'binding state released' in line: + leases['released'] += 1 + elif 'binding state abandoned' in line: + leases['abandoned'] += 1 + elif 'binding state reset' in line: + leases['reset'] += 1 + elif 'binding state bootp' in line: + leases['bootp'] += 1 + elif 'binding state backup' in line: + leases['backup'] += 1 + elif 'binding state free' in line: + leases['free'] += 1 + +shell_cmd = "dhcpd-pools -s i -A" +pool_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') + +data = {'leases': leases, + 'pools': [], + 'networks': [], + 'all_networks': [] + } + +category = None +jump_line = 0 +for p in pool_data: + line = p.decode('utf-8') + + if jump_line: + jump_line -= 1 + continue + + if line.startswith('Ranges:'): + category = 'pools' + jump_line = 1 + continue + + if line.startswith('Shared networks:'): + category = 'networks' + jump_line = 1 + continue + + if line.startswith('Sum of all ranges:'): + category = 'all_networks' + jump_line = 1 + continue + + if not len(line): + continue + + p = line.split() + + if category == 'pools': + data[category].append({'first_ip': p[1], + 'last_ip':p[3], + 'max': p[4], + 'cur': p[5], + 'percent': p[6], + }) + continue + + if category == 'networks': + data[category].append({'network': p[0], + 'max': p[1], + 'cur': p[2], + 'percent': p[3], + }) + continue + + if category == 'all_networks': + data[category] ={'max': p[2], + 'cur': p[3], + 'percent': p[4], + } + continue + + +output = {'version': version, + 'error': error, + 'errorString': error_string, + 'data': data} + +print (json.dumps(output)) From 2bc63d6512beb75772b5ad6c397d569f6dacf232 Mon Sep 17 00:00:00 2001 From: N Date: Wed, 17 Jun 2020 22:22:13 +0100 Subject: [PATCH 261/497] Fix up smart script to also parse some values from NVMe drives (#308) --- snmp/smart | 185 ++++++++++++++++++++++++++++++----------------------- 1 file changed, 106 insertions(+), 79 deletions(-) diff --git a/snmp/smart b/snmp/smart index 605340fb9..5793b90db 100755 --- a/snmp/smart +++ b/snmp/smart @@ -283,105 +283,132 @@ foreach my $line ( @disks ){ '233'=>'null', '9'=>'null', ); + + my @outputA; + + if($output =~ /NVMe Log/) + { + # we have an NVMe drive with annoyingly different output + my %mappings=( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + ); + foreach(split(/\n/, $output )) + { + if(/:/) + { + my ($key, $val) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if(exists($mappings{$key})) + { + $IDs{$mappings{$key}} = $val; + } + } + } - my @outputA=split( /\n/, $output ); - my $outputAint=0; - while ( defined($outputA[$outputAint]) ) { - my $line=$outputA[$outputAint]; - $line=~s/^ +//; - $line=~s/ +/ /g; - - if ( $line =~ /^[0123456789]+ / ) { - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[9]; - my $id=$lineA[0]; - - # Crucial SSD - # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left - if ( $id == 202 ) { - $IDs{231}=$raw; - } + } + else + { + @outputA=split( /\n/, $output ); + my $outputAint=0; + while ( defined($outputA[$outputAint]) ) { + my $line=$outputA[$outputAint]; + $line=~s/^ +//; + $line=~s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[9]; + my $id=$lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231}=$raw; + } - # single int raw values - if ( - ( $id == 5 ) || - ( $id == 10 ) || - ( $id == 173 ) || - ( $id == 177 ) || - ( $id == 183 ) || - ( $id == 184 ) || - ( $id == 187 ) || - ( $id == 196 ) || - ( $id == 197 ) || - ( $id == 198 ) || - ( $id == 199 ) || - ( $id == 231 ) || - ( $id == 233 ) - ) { - $IDs{$id}=$raw; - } + # single int raw values + if ( + ( $id == 5 ) || + ( $id == 10 ) || + ( $id == 173 ) || + ( $id == 177 ) || + ( $id == 183 ) || + ( $id == 184 ) || + ( $id == 187 ) || + ( $id == 196 ) || + ( $id == 197 ) || + ( $id == 198 ) || + ( $id == 199 ) || + ( $id == 231 ) || + ( $id == 233 ) + ) { + $IDs{$id}=$raw; + } - # 9, power on hours - if ( $id == 9 ) { - my @runtime=split(/[\ h]/, $raw); - $IDs{$id}=$runtime[0]; - } + # 9, power on hours + if ( $id == 9 ) { + my @runtime=split(/[\ h]/, $raw); + $IDs{$id}=$runtime[0]; + } - # 188, Command_Timeout - if ( $id == 188 ) { - my $total=0; - my @rawA=split( /\ /, $raw ); - my $rawAint=0; - while ( defined( $rawA[$rawAint] ) ) { - $total=$total+$rawA[$rawAint]; - $rawAint++; + # 188, Command_Timeout + if ( $id == 188 ) { + my $total=0; + my @rawA=split( /\ /, $raw ); + my $rawAint=0; + while ( defined( $rawA[$rawAint] ) ) { + $total=$total+$rawA[$rawAint]; + $rawAint++; + } + $IDs{$id}=$total; } - $IDs{$id}=$total; - } - # 190, airflow temp - # 194, temp - if ( - ( $id == 190 ) || - ( $id == 194 ) - ) { - my ( $temp )=split(/\ /, $raw); - $IDs{$id}=$temp; + # 190, airflow temp + # 194, temp + if ( + ( $id == 190 ) || + ( $id == 194 ) + ) { + my ( $temp )=split(/\ /, $raw); + $IDs{$id}=$temp; + } } - } - # SAS Wrapping - # Section by Cameron Munroe (munroenet[at]gmail.com) + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) - # Elements in Grown Defect List. - # Marking as 5 Reallocated_Sector_Ct + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct - if ($line =~ "Elements in grown defect list:"){ + if ($line =~ "Elements in grown defect list:"){ - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[5]; + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[5]; - # Reallocated Sector Count ID - $IDs{5}=$raw; + # Reallocated Sector Count ID + $IDs{5}=$raw; - } + } - # Current Drive Temperature - # Marking as 194 Temperature_Celsius + # Current Drive Temperature + # Marking as 194 Temperature_Celsius - if ($line =~ "Current Drive Temperature:"){ + if ($line =~ "Current Drive Temperature:"){ - my @lineA=split(/\ /, $line, 10); - my $raw=$lineA[3]; + my @lineA=split(/\ /, $line, 10); + my $raw=$lineA[3]; - # Temperature C ID - $IDs{194}=$raw; + # Temperature C ID + $IDs{194}=$raw; - } + } - # End of SAS Wrapper + # End of SAS Wrapper - $outputAint++; + $outputAint++; + } } #get the selftest logs From 3de65ad70b52cf97af570a85eb1c2e43bcd5fdca Mon Sep 17 00:00:00 2001 From: yon2004 Date: Thu, 18 Jun 2020 17:42:28 +1000 Subject: [PATCH 262/497] Update ups-nut.sh (#298) * Update ups-nut.sh Added Support for the ups.status and the 14 supported states as per https://networkupstools.org/docs/developer-guide.chunked/ar01s04.html#_status_data * Update ups-nut.sh --- snmp/ups-nut.sh | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index cc04c8468..18f431469 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -23,3 +23,18 @@ do echo "Unknown" fi done + +for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" +do + UNKNOWN=$(echo $TMP | grep -Eo "ups\.status:") + if [ -z "$UNKNOWN" ]; then + echo "Unknown" + else + OUT=$(echo $TMP | grep -Eo "$value") + if [ -n "$OUT" ]; then + echo "1" + else + echo "0" + fi + fi +done From a59811b1344bf9b777c61cfb21b4ab015eec6ab2 Mon Sep 17 00:00:00 2001 From: Mark Westerterp Date: Tue, 30 Jun 2020 23:56:49 +0200 Subject: [PATCH 263/497] Make this script Python 3 compatible (#311) --- agent-local/ceph | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 9a83d3989..1301f79ec 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 # Copyright (C) 2015 Mark Schouten # @@ -18,12 +18,12 @@ from subprocess import check_output import json def cephversion(): - cephv = check_output(["/usr/bin/ceph", "version"]).replace('ceph version ', '') + cephv = check_output(["/usr/bin/ceph", "version"]).decode("utf-8").replace('ceph version ', '') major, minor = cephv.split('.')[0:2] return [int(major), int(minor)] def cephdf(): - cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).replace('-inf', '0') + cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).decode("utf-8").replace('-inf', '0') s = json.loads(cephdf) try: @@ -50,7 +50,7 @@ def cephdf(): def osdperf(): global major - osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).replace('-inf', '0') + osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).decode("utf-8").replace('-inf', '0') if major > 13: for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: @@ -61,33 +61,33 @@ def osdperf(): def poolstats(): global major - poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).replace('-inf', '0') + poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).decode("utf-8").replace('-inf', '0') for p in json.loads(poolstats): - try: + try: r = p['client_io_rate']['read_bytes_sec'] except: r = 0 - try: + try: w = p['client_io_rate']['write_bytes_sec'] except: w = 0 - try: + try: if major > 11: o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] else: o = p['client_io_rate']['op_per_sec'] except: o = 0 - + print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) major, minor = cephversion() -print "<<>>" -print "" +print ("<<>>") +print ("") poolstats() -print "" +print ("") osdperf() -print "" +print ("") cephdf() From 421f4e999a77cf0693d54b2648970af05fb159d9 Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Wed, 1 Jul 2020 18:27:21 +0100 Subject: [PATCH 264/497] Update proxmox versions detection on Distro (#316) Update proxmox versions detection on Distro Before change "Debian GNU/Linux 10" After change "Debian 10.3/PVE 6.1-8" --- snmp/distro | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/distro b/snmp/distro index 75fa74d2a..6771e8fd5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -62,6 +62,7 @@ elif [ "${OS}" = "Linux" ] ; then fi if [ -f /usr/bin/pveversion ]; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + IGNORE_OS_RELEASE=1 fi elif [ -f /etc/gentoo-release ] ; then From d76c2e145e61a6e590d7c0f305a35598417bd8e6 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 4 Jul 2020 13:13:32 +0200 Subject: [PATCH 265/497] FreeRadius optional Configfile (#317) --- snmp/freeradius.sh | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 088acf3c1..11d343585 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +CONFIGFILE=/etc/snmp/freeradius.conf + # Set 0 for SNMP extend; set to 1 for Check_MK agent AGENT=0 @@ -8,6 +10,10 @@ RADIUS_SERVER='localhost' RADIUS_PORT='18121' RADIUS_KEY='adminsecret' +if [ -f $CONFIGFILE ]; do + . $CONFIGFILE +done + # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' From 806ad4306f8191653655495a34640cd56055045f Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 4 Jul 2020 17:44:40 +0200 Subject: [PATCH 266/497] Configurable env binary path (#318) --- snmp/ntp-server.sh | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index bbf5c737a..5871d0377 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -10,14 +10,24 @@ # If you are unsure, which to set, run this script and make sure that # the JSON output variables match that in "ntpq -c rv". # -BIN_NTPD='/usr/bin/env ntpd' -BIN_NTPQ='/usr/bin/env ntpq' -BIN_NTPDC='/usr/bin/env ntpdc' -BIN_GREP='/usr/bin/env grep' -BIN_TR='/usr/bin/env tr' -BIN_CUT='/usr/bin/env cut' -BIN_SED="/usr/bin/env sed" -BIN_AWK='/usr/bin/env awk' + +CONFIGFILE=/etc/snmp/ntp-server.conf + +BIN_ENV='/usr/bin/env' + +if [ -f $CONFIGFILE ] ; then + . $CONFIGFILE +fi + +BIN_NTPD="$BIN_ENV ntpd" +BIN_NTPQ="$BIN_ENV ntpq" +BIN_NTPDC="$BIN_ENV ntpdc" +BIN_GREP="$BIN_ENV grep" +BIN_TR="$BIN_ENV tr" +BIN_CUT="$BIN_ENV cut" +BIN_SED="$BIN_ENV sed" +BIN_AWK="$BIN_ENV awk" + NTPQV="p11" ################################################################ # Don't change anything unless you know what are you doing # From c68d8e203d47b037dada118c2459f204d2574d60 Mon Sep 17 00:00:00 2001 From: arrmo Date: Thu, 9 Jul 2020 16:13:51 -0500 Subject: [PATCH 267/497] Scripts for Openwrt (#314) --- snmp/Openwrt/wlClients.sh | 36 +++++++++++++++++++++++++++++++++++ snmp/Openwrt/wlFrequency.sh | 19 ++++++++++++++++++ snmp/Openwrt/wlInterfaces.txt | 2 ++ snmp/Openwrt/wlNoiseFloor.sh | 20 +++++++++++++++++++ snmp/Openwrt/wlRate.sh | 32 +++++++++++++++++++++++++++++++ snmp/Openwrt/wlSNR.sh | 30 +++++++++++++++++++++++++++++ 6 files changed, 139 insertions(+) create mode 100755 snmp/Openwrt/wlClients.sh create mode 100755 snmp/Openwrt/wlFrequency.sh create mode 100755 snmp/Openwrt/wlInterfaces.txt create mode 100755 snmp/Openwrt/wlNoiseFloor.sh create mode 100755 snmp/Openwrt/wlRate.sh create mode 100755 snmp/Openwrt/wlSNR.sh diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh new file mode 100755 index 000000000..cf6195f62 --- /dev/null +++ b/snmp/Openwrt/wlClients.sh @@ -0,0 +1,36 @@ +#!/bin/sh + +# wlClients.sh +# Counts connected (associated) Wi-Fi devices +# Arguments: targed interface. Assumes all interfaces if no argument + +# Check number of arguments +if [ $# -gt 1 ]; then + /bin/echo "Usage: wlClients.sh interface" + /bin/echo "Too many command line arguments, exiting." + exit 1 +fi + +# Get path to this script +scriptdir=$(dirname $(readlink -f -- $0)) + +# Get hostname, interface list. Set target, which is name returned for interface +hostname=`/bin/uname -n` +if [ $1 ]; then + interfaces=$1 + target=$1 +else + interfaces=`cat $scriptdir/wlInterfaces.txt | cut -f 1 -d","` + target=wlan +fi + +# Count associated devices +count=0 +for interface in $interfaces +do + new=`/usr/sbin/iw dev $interface station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l` + count=$(( $count + $new )) +done + +# Return snmp result +/bin/echo $count diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh new file mode 100755 index 000000000..119fb54af --- /dev/null +++ b/snmp/Openwrt/wlFrequency.sh @@ -0,0 +1,19 @@ +#!/bin/sh + +# wlFrequency.sh +# Returns wlFrequency, in MHz (not channel number) +# Arguments: targed interface + +# Check number of arguments +if [ $# -ne 1 ]; then + /bin/echo "Usage: wlFrequency.sh interface" + /bin/echo "Missing targeted interface, exiting." + exit 1 +fi + +# Get hostname, extract frequency +hostname=`/bin/uname -n` +frequency=`/usr/sbin/iw dev $1 info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" "` + +# Return snmp result +/bin/echo $frequency diff --git a/snmp/Openwrt/wlInterfaces.txt b/snmp/Openwrt/wlInterfaces.txt new file mode 100755 index 000000000..bfe882e0e --- /dev/null +++ b/snmp/Openwrt/wlInterfaces.txt @@ -0,0 +1,2 @@ +wlan0,wl-2.4G +wlan1,wl-5.0G diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh new file mode 100755 index 000000000..ab404364d --- /dev/null +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -0,0 +1,20 @@ +#!/bin/sh + +# wlNoiseFloor.sh +# Returns wlNoiseFloor, in dBm +# Arguments: targed interface + +# Check number of arguments +if [ $# -ne 1 ]; then + /bin/echo "Usage: wlNoiseFloor.sh interface" + /bin/echo "Missing targeted interface, exiting." + exit 1 +fi + +# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one +# Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! +hostname=`/bin/uname -n` +noise=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1` + +# Return snmp result +/bin/echo $noise diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh new file mode 100755 index 000000000..76ab0c881 --- /dev/null +++ b/snmp/Openwrt/wlRate.sh @@ -0,0 +1,32 @@ +#!/bin/sh + +# wlRate.sh +# Returns wlRate, bit rate in Mbit/s +# Arguments: +# $1: targeted interface +# $2: direction (tx or rx) +# $3: desired result (sum, avg, min, max) + +# Check number of arguments +if [ $# -ne 3 ]; then + /bin/echo "Usage: wlRate.sh interface direction result" + /bin/echo "Incorrect script usage, exiting." + exit 1 +fi + +# Get hostname, calculate result. Sum just for debug, and have to return integer +# => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) +hostname=`/bin/uname -n` +ratelist=`/usr/sbin/iw dev $1 station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" "` +if [ "$3" == "sum" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}'` +elif [ "$3" == "avg" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}'` +elif [ "$3" == "min" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}'` +elif [ "$3" == "max" ]; then + result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}'` +fi + +# Return snmp result +echo $result diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh new file mode 100755 index 000000000..337d55979 --- /dev/null +++ b/snmp/Openwrt/wlSNR.sh @@ -0,0 +1,30 @@ +#!/bin/sh + +# wlSNR.sh +# Returns wlSNR, Signal-to-Noise ratio in dB +# Arguments: +# $1: targeted interface +# $2: desired result (sum, avg, min, max) + +# Check number of arguments +if [ $# -ne 2 ]; then + /bin/echo "Usage: wlSNR.sh interface result" + /bin/echo "Incorrect script usage, exiting." + exit 1 +fi + +# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) +hostname=`/bin/uname -n` +snrlist=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1` +if [ "$2" == "sum" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}'` +elif [ "$2" == "avg" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}'` +elif [ "$2" == "min" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}'` +elif [ "$2" == "max" ]; then + result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}'` +fi + +# Return snmp result +echo $result From 842f709e50cd589f20b3450d90ef9c04d8d913bb Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 11 Jul 2020 17:12:01 +0200 Subject: [PATCH 268/497] pi-hol script update (#319) --- snmp/pi-hole | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snmp/pi-hole b/snmp/pi-hole index 99309a198..67e35d613 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -2,11 +2,17 @@ set -euo pipefail IFS=$'\n\t' +CONFIGFILE='/etc/snmp/pi-hole.conf' + API_AUTH_KEY="" API_URL="localhost/admin/api.php" URL_READ_ONLY="?summaryRaw" URL_QUERY_TYPE="?getQueryTypes&auth=" +if [ -f $CONFIGFILE ]; then + . $CONFIGFILE +fi + #/ Description: BASH script to get Pi-hole stats #/ Examples: ./pi-hole-stats.sh From 9f877990eea2dd395152e1e7fe75e8f74408313a Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Tue, 14 Jul 2020 01:10:37 +0200 Subject: [PATCH 269/497] Fix parsing Error on MDADM if increasing disc count (#320) --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 5265fe69f..f4ccf6c0d 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -36,7 +36,7 @@ if [ -d /dev/md ] ; then fi RAID_DEV_LIST=$($LS $RAID/slaves/) RAID_LEVEL=$($CAT $RAID/md/level) - RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks) + RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks| cut -d' ' -f1) RAID_STATE=$($CAT $RAID/md/array_state) RAID_ACTION=$($CAT $RAID/md/sync_action) RAID_DEGRADED=$($CAT $RAID/md/degraded) From f26c1e5b4d0479708bcb01dccdc1905cd7ff8fa1 Mon Sep 17 00:00:00 2001 From: Slashdoom <5092581+slashdoom@users.noreply.github.com> Date: Sat, 18 Jul 2020 11:59:17 +1200 Subject: [PATCH 270/497] Update check_mrpe (#321) Include exit code when proper -a flag is specified. --- agent-local/check_mrpe | 32 +++++++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index e80c62d70..6cd3a044c 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -1,6 +1,6 @@ #!/usr/bin/env bash -VERSION="0.1 (beta)" +VERSION="0.2 (beta)" IFS=$'\n\t' GLOBIGNORE="*" @@ -10,12 +10,12 @@ BIN_SED="$(command -v sed)" pval="6556" -EXITCODE=0 +EXITCODE=3 while getopts "Vha:H:p:" opt; do case $opt in V) printf "check_mrpe v$VERSION\n" - exit $EXITCODE + exit 0 ;; h) hflag=1 ;; @@ -29,7 +29,7 @@ while getopts "Vha:H:p:" opt; do pval="$OPTARG" ;; \?) hflag=1 - EXITCODE=1 + EXITCODE=0 ;; esac done @@ -50,11 +50,33 @@ fi if [ "$aflag" ]; then SED_CMD="s/\((.*) $aval [0-9] \)\(.*\)/\2/p" + SED_CMD_STATUS="s/(.*) $aval \([0-9]\) \(.*\)/\1/p" else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<< Date: Sat, 18 Jul 2020 19:37:45 +0300 Subject: [PATCH 271/497] Use sh in ups-nut.sh shebang (#315) since the `ups-nut.sh` script is not using any bash specific syntax. This change removes unneeded dependency on bash. --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 18f431469..7e3d8a15a 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/bin/sh ################################################################ # Instructions: # # 1. copy this script to /etc/snmp/ and make it executable: # From 386e7e3bfae08876fb843df13887ea194e7d8a34 Mon Sep 17 00:00:00 2001 From: Martin Date: Sat, 18 Jul 2020 18:40:16 +0200 Subject: [PATCH 272/497] Fix missing compatibility to Pi-hole V5 API (#322) To minimize the impact of future API changes in terms of data order or additional data being appended, required elements are selected directly with the jq tool in the expected order. --- snmp/pi-hole | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index 67e35d613..fc5f52d77 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -71,10 +71,10 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.[]') + GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') echo $GET_STATS | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[][]') + GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo $GET_QUERY_TYPE | tr " " "\n" } From e778097de649c62459c4a31fc2ddfa96a4bcc86e Mon Sep 17 00:00:00 2001 From: Yoan Tanguy Date: Mon, 20 Jul 2020 22:18:22 +0200 Subject: [PATCH 273/497] Fix bash check from directory to block type (#295) * Fix bash check from directory to block type https://github.com/librenms/librenms-agent/issues/278 * Update mdadm fix array existance check Co-authored-by: SourceDoctor --- snmp/mdadm | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index f4ccf6c0d..ecbc8e510 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -14,10 +14,11 @@ VERSION=1 ERROR_CODE=0 ERROR_STRING="" -OUTPUT_DATA='['\ +OUTPUT_DATA='[' -if [ -d /dev/md ] ; then - for ARRAY_BLOCKDEVICE in $(ls -1 /dev/md/*) ; do +# use 'ls' command to check if md blocks exist +if $LS /dev/md?* 1> /dev/null 2>&1 ; then + for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves @@ -29,7 +30,7 @@ if [ -d /dev/md ] ; then continue fi - if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]]; then + if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]] ; then RAID_NAME=$($BASENAME $RAID) else RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) @@ -101,11 +102,13 @@ if [ -d /dev/md ] ; then '","sync_completed":"'$RAID_SYNC_COMPLETED\ '"},' - OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA + OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA done -fi -OUTPUT_DATA=${OUTPUT_DATA: : -1}']' + OUTPUT_DATA=${OUTPUT_DATA: : -1}']' +else + OUTPUT_DATA=${OUTPUT_DATA}']' +fi OUTPUT='{"data":'$OUTPUT_DATA\ ',"error":"'$ERROR_CODE\ From 0bb2c15c780c8d13d17d41d9b875cb25dd077cf8 Mon Sep 17 00:00:00 2001 From: Hans Erasmus Date: Tue, 18 Aug 2020 20:37:44 +0200 Subject: [PATCH 274/497] Rewrite Apache SNMP Agent to Python3 (#326) * Updated to work with python3 Thanks to @murrant this file is now python3 compatible. * Formatted for PEP8 specs * Updated except according to request * Updated according to request As requested [here](https://github.com/librenms/librenms/pull/12009) by SourceDoctor. --- snmp/apache-stats.py | 112 +++++++++++++++++++++---------------------- 1 file changed, 54 insertions(+), 58 deletions(-) diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index f098a8c55..1421c20e3 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -1,10 +1,10 @@ -#!/usr/bin/python -# Copyright (C) 2009 Glen Pitt-Pladdy +#!/usr/bin/python3 +# Copyright(C) 2009 Glen Pitt-Pladdy # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 -# of the License, or (at your option) any later version. +# of the License, or(at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of @@ -17,73 +17,69 @@ # # # - -CACHETIME = 30 -CACHEFILE = '/var/cache/librenms/apache-snmp' - -# check for cache file newer CACHETIME seconds ago import os import time -if os.path.isfile ( CACHEFILE ) \ - and ( time.time() - os.stat ( CACHEFILE )[8] ) < CACHETIME: - # use cached data - f = open ( CACHEFILE, 'r' ) - data = f.read() - f.close() +import urllib.request + +cachetime = 30 +cachefile = '/var/cache/librenms/apache-snmp' + +# Check for a cache file newer than cachetime seconds ago + +if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime: + # Use cached data + f = open(cachefile, 'r') + data = f.read() + f.close() else: - # grab the status URL (fresh data) - # need debian package python-urlgrabber - from urlgrabber import urlread - data = urlread ( 'http://localhost/server-status?auto', - user_agent = 'SNMP Apache Stats' ) - # write file - f = open ( CACHEFILE+'.TMP.'+`os.getpid()`, 'w' ) - f.write ( data ) - f.close() - os.rename ( CACHEFILE+'.TMP.'+`os.getpid()`, CACHEFILE ) + # Grab the status URL (fresh data), needs package urllib3 + data = urllib.request.urlopen("http://localhost/server-status?auto").read().decode('UTF-8') + # Write file + f = open(cachefile+'.TMP.'+str(os.getpid()), 'w') + f.write(data) + f.close() + os.rename(cachefile+'.TMP.'+str(os.getpid()), cachefile) # dice up the data -scoreboardkey = [ '_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.' ] +scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] params = {} for line in data.splitlines(): - fields = line.split( ': ' ) - if len(fields) <= 1: - continue # "localhost" as first line cause out of index error - elif fields[0] == 'Scoreboard': - # count up the scoreboard into states - states = {} - for state in scoreboardkey: - states[state] = 0 - for state in fields[1]: - states[state] += 1 - elif fields[0] == 'Total kBytes': - # turn into base (byte) value - params[fields[0]] = int(fields[1])*1024 - elif len(fields) > 1: - # just store everything else - params[fields[0]] = fields[1] + fields = line.split(': ') + if len(fields) <= 1: + continue # "localhost" as first line causes out of index error + elif fields[0] == 'Scoreboard': + # count up the scoreboard into states + states = {} + for state in scoreboardkey: + states[state] = 0 + for state in fields[1]: + states[state] += 1 + elif fields[0] == 'Total kBytes': + # turn into base(byte) value + params[fields[0]] = int(fields[1])*1024 + elif len(fields) > 1: + # just store everything else + params[fields[0]] = fields[1] -# output the data in order (this is because some platforms don't have them all) +# output the data in order(this is because some platforms don't have them all) dataorder = [ - 'Total Accesses', - 'Total kBytes', - 'CPULoad', - 'Uptime', - 'ReqPerSec', - 'BytesPerSec', - 'BytesPerReq', - 'BusyWorkers', - 'IdleWorkers' + 'Total Accesses', + 'Total kBytes', + 'CPULoad', + 'Uptime', + 'ReqPerSec', + 'BytesPerSec', + 'BytesPerReq', + 'BusyWorkers', + 'IdleWorkers' ] for param in dataorder: - try: - print params[param] -# print param - except: # not all Apache's have all stats - print 'U' + try: + print(params[param]) + except KeyError: # not all Apache's have all stats + print('U') # print the scoreboard for state in scoreboardkey: - print states[state] -# print state + print(states[state]) From 76c10bff849cea996778f3d3fee39367427eb18f Mon Sep 17 00:00:00 2001 From: arrmo Date: Fri, 4 Sep 2020 16:29:03 -0500 Subject: [PATCH 275/497] Update distro to support BeagleBoard (#328) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 6771e8fd5..3f0d087f6 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,10 @@ elif [ "${OS}" = "Linux" ] ; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" IGNORE_OS_RELEASE=1 fi + if [ -f /etc/dogtag ]; then + DIST=`cat /etc/dogtag` + IGNORE_OS_RELEASE=1 + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 1f9e03db2c63652cf67bd2c6929617e29b93ac28 Mon Sep 17 00:00:00 2001 From: priiduonu Date: Sat, 5 Sep 2020 02:00:52 +0300 Subject: [PATCH 276/497] get correct ARM frequency reading in raspberry.sh (#325) Fixes #324 --- snmp/raspberry.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index df4ffcea4..41f2902a0 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -24,8 +24,8 @@ $picmd $getVoltsCore | $pised 's|[^0-9.]||g' $picmd $getVoltsRamC | $pised 's|[^0-9.]||g' $picmd $getVoltsRamI | $pised 's|[^0-9.]||g' $picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -$picmd $getFreqArm | $pised 's/frequency(45)=//g' -$picmd $getFreqCore | $pised 's/frequency(1)=//g' +$picmd $getFreqArm | $pised 's/frequency([0-9]*)=//g' +$picmd $getFreqCore | $pised 's/frequency([0-9]*)=//g' $picmd $getStatusH264 | $pised 's/H264=//g' $picmd $getStatusMPG2 | $pised 's/MPG2=//g' $picmd $getStatusWVC1 | $pised 's/WVC1=//g' From fcdc9179de974b6efef11e85518dad2fc1b8edfc Mon Sep 17 00:00:00 2001 From: Avinash Kumar Date: Fri, 18 Sep 2020 20:18:51 +0530 Subject: [PATCH 277/497] Opensips, Icecast and Voipmon snmp scipts (#331) * Icecast script added * Opensips script added * Voipmon script added * Open files added to icecast stats Co-authored-by: avinash kumar --- snmp/icecast-stats.sh | 14 ++++++++++++++ snmp/opensips-stats.sh | 17 +++++++++++++++++ snmp/voipmon-stats.sh | 13 +++++++++++++ 3 files changed, 44 insertions(+) create mode 100644 snmp/icecast-stats.sh create mode 100644 snmp/opensips-stats.sh create mode 100644 snmp/voipmon-stats.sh diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh new file mode 100644 index 000000000..c93c6bca0 --- /dev/null +++ b/snmp/icecast-stats.sh @@ -0,0 +1,14 @@ +#!/bin/bash +# Author: Sharad Kumar + +used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') +cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') + +pid=$(pidof icecast) +total_files=$(ls -l /proc/${pid}/fd | wc -l) + +echo "Used Memory="$used_memory +echo "CPU Load="$cpu_load +echo "Open files="$total_files + +exit diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh new file mode 100644 index 000000000..e8fe2b249 --- /dev/null +++ b/snmp/opensips-stats.sh @@ -0,0 +1,17 @@ +#!/bin/bash +# Author: Sharad Kumar + +total_memory=$(opensipsctl fifo get_statistics total_size | awk '{print "Total Memory=" $2}') +used_memory=$(opensipsctl fifo get_statistics real_used_size | awk '{print "Used Memory=" $2}') +free_memory=$(opensipsctl fifo get_statistics free_size | awk '{print "Free Memory=" $2}') +load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') +total_files=$(lsof -c opensips | wc -l) + + +echo $total_memory +echo $used_memory +echo $free_memory +echo $load_average +echo "Open files="$total_files + +exit diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh new file mode 100644 index 000000000..671a04af9 --- /dev/null +++ b/snmp/voipmon-stats.sh @@ -0,0 +1,13 @@ +#!/bin/bash +# Author: Sharad Kumar + +used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') +cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') + +pid=$(pidof voipmonitor) +total_files=$(ls -l /proc/${pid}/fd | wc -l) + +echo "Used Memory="$used_memory +echo "CPU Load="$cpu_load +echo "Open files="$total_files +exit From 0a7304ebc24f7c4cd411e201c8a0ab21f6911b21 Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Mon, 21 Sep 2020 07:17:35 +0800 Subject: [PATCH 278/497] Add Proxmox MG identification (#329) Added distro support to identify Proxmox MG (PMG) version. --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index 3f0d087f6..ee29dc678 100755 --- a/snmp/distro +++ b/snmp/distro @@ -64,6 +64,11 @@ elif [ "${OS}" = "Linux" ] ; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" IGNORE_OS_RELEASE=1 fi + if [ -f /usr/bin/pmgversion ]; then + # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. + DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" + IGNORE_OS_RELEASE=1 + fi if [ -f /etc/dogtag ]; then DIST=`cat /etc/dogtag` IGNORE_OS_RELEASE=1 From 8e096169086ba21ecc424e5d2f7592c25c42b72c Mon Sep 17 00:00:00 2001 From: Clark Chen <9372896+clarkchentw@users.noreply.github.com> Date: Wed, 23 Sep 2020 19:11:08 -0500 Subject: [PATCH 279/497] Add support for Alpine (apk) (#332) * Add support for Alpine (apk) * Minor fix Add back #!/usr/bin/env bash --- snmp/osupdate | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 8a391fa2b..6e6f8f533 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -26,6 +26,8 @@ BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' BIN_PKG='/usr/sbin/pkg' CMD_PKG=' audit -q -F' +BIN_APK='/sbin/apk' +CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # @@ -78,6 +80,14 @@ elif command -v pkg &>/dev/null ; then else echo "0"; fi +elif command -v apk &>/dev/null ; then + # Alpine + UPDATES=`$BIN_APK $CMD_APK | $BIN_WC $CMD_WC` + if [ $UPDATES -ge 2 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi else echo "0"; fi From a6966fff5c65264d14cfaf28ef4399daf720e316 Mon Sep 17 00:00:00 2001 From: Laurent Cheylus Date: Sat, 10 Oct 2020 12:49:14 +0200 Subject: [PATCH 280/497] Fix distro for FreeBSD/FreeNAS (#335) Check if /etc/version file present for FreeBSD OS Without fix on FreeBSD: ./distro cat: /etc/version: No such file or directory FreeBSD 12.1-RELEASE-p5 amd64 GENERIC --- snmp/distro | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index ee29dc678..f08606e7e 100755 --- a/snmp/distro +++ b/snmp/distro @@ -139,9 +139,11 @@ elif [ "${OS}" = "Darwin" ] ; then fi elif [ "${OS}" = "FreeBSD" ] ; then - DIST=$(cat /etc/version | cut -d'-' -f 1) - if [ "${DIST}" = "FreeNAS" ]; then - OSSTR=`cat /etc/version | cut -d' ' -f 1` + if [ -f /etc/version ] ; then + DIST=$(cat /etc/version | cut -d'-' -f 1) + if [ "${DIST}" = "FreeNAS" ]; then + OSSTR=`cat /etc/version | cut -d' ' -f 1` + fi else OSSTR=`/usr/bin/uname -mior` fi From 5dfa50ab3dd49f157d5c340908733fdd148ef6bc Mon Sep 17 00:00:00 2001 From: Roman Kuzmitskii Date: Sat, 10 Oct 2020 16:51:24 +0600 Subject: [PATCH 281/497] stop using bash for distro script. bash is too big for embedded (#334) --- snmp/distro | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index f08606e7e..9e8768cd5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -1,4 +1,4 @@ -#!/usr/bin/env bash +#!/usr/bin/env sh # Detects which OS and if it is Linux then it will detect which Linux Distribution. OS=`uname -s` @@ -19,7 +19,7 @@ elif [ "${OS}" = "Linux" ] ; then if [ -f /etc/fedora-release ]; then DIST=$(cat /etc/fedora-release | awk '{print $1}') REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//` - + elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then @@ -113,7 +113,7 @@ elif [ "${OS}" = "Linux" ] ; then # try standardized os version methods if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then - source /etc/os-release + . /etc/os-release STD_DIST="$NAME" STD_REV="$VERSION_ID" elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then From 70d41776beb66ed6cefd5f77dfadd3ffa85511b4 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 22 Oct 2020 02:40:18 +0200 Subject: [PATCH 282/497] Delete snmpd.conf.example Out of date, let's use the one in the main repo instead --- snmp/snmpd.conf.example | 13 ------------- 1 file changed, 13 deletions(-) delete mode 100644 snmp/snmpd.conf.example diff --git a/snmp/snmpd.conf.example b/snmp/snmpd.conf.example deleted file mode 100644 index a10ffcd64..000000000 --- a/snmp/snmpd.conf.example +++ /dev/null @@ -1,13 +0,0 @@ -# Change RANDOMSTRINGGOESHERE to your preferred SNMP community string -com2sec readonly default RANDOMSTRINGGOESHERE - -group MyROGroup v2c readonly -view all included .1 80 -access MyROGroup "" any noauth exact all none none - -syslocation Rack, Room, Building, City, Country [GPSX,Y] -syscontact Your Name - -#Distro Detection -extend .1.3.6.1.4.1.2021.7890.1 distro /usr/bin/distro - From af0106920a6f7d3d072f54d620ddbc69f37a6470 Mon Sep 17 00:00:00 2001 From: Kevin Zink Date: Wed, 16 Dec 2020 01:05:49 +0100 Subject: [PATCH 283/497] Bugfix (#340) Fix syntax errors --- snmp/freeradius.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 11d343585..dac7e9980 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -10,9 +10,9 @@ RADIUS_SERVER='localhost' RADIUS_PORT='18121' RADIUS_KEY='adminsecret' -if [ -f $CONFIGFILE ]; do +if [ -f $CONFIGFILE ]; then . $CONFIGFILE -done +fi # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' From a7f820023b56aba4c9731d89761f1095461e97a3 Mon Sep 17 00:00:00 2001 From: mkninc Date: Sat, 2 Jan 2021 02:59:44 +0100 Subject: [PATCH 284/497] Add TrueNAS 12 compatibility (#342) * Configurable paths for zpool/sysctl * Allow non int values * Ignore empty lines from sysctl output --- snmp/zfs-freebsd.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index 2227598df..d32e959a1 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -5,6 +5,9 @@ import json import subprocess +SYSCTL = '/sbin/sysctl' +ZPOOL = '/usr/local/sbin/zpool' + def percent(numerator, denominator, default=0): try: return numerator / denominator * 100 @@ -12,15 +15,19 @@ def percent(numerator, denominator, default=0): return default def main(args): - p = subprocess.run(['/sbin/sysctl', '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) - + p = subprocess.run([SYSCTL, '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) + if p.returncode != 0: return p.returncode def chomp(line): bits = [b.strip() for b in line.split(':')] - return bits[0], int(bits[1]) - stats = dict(chomp(l) for l in p.stdout.splitlines()) + try: + return bits[0], int(bits[1]) + except ValueError: + return bits[0], bits[1] + + stats = dict(chomp(l) for l in p.stdout.splitlines() if l) if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 @@ -92,7 +99,7 @@ def chomp(line): output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) # pools - p = subprocess.run(['/sbin/zpool', 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) + p = subprocess.run([ZPOOL, 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) if p.returncode != 0: return p.returncode output['pools'] = [] From 8c1bcfb8adb4098c8662ef2480c305ffc53bd33b Mon Sep 17 00:00:00 2001 From: SharadKumar97 Date: Fri, 1 Jan 2021 20:00:49 -0600 Subject: [PATCH 285/497] Adding opensips3.X + version support (#338) --- snmp/opensip3-stats.sh | 19 +++++++++++++++++++ snmp/opensips-stats.sh | 1 + 2 files changed, 20 insertions(+) create mode 100644 snmp/opensip3-stats.sh diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh new file mode 100644 index 000000000..fa85e023b --- /dev/null +++ b/snmp/opensip3-stats.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Author: Sharad Kumar +# This script is for OpenSIPS 3.X + version + +total_memory=$(opensips-cli -x mi get_statistics total_size | awk '/shmem:total_size/ { gsub(/[",]/,""); print "Total Memory=" $2}') +used_memory=$(opensips-cli -x mi get_statistics real_used_size | awk '/shmem:real_used_size/ { gsub(/[",]/,""); print "Used Memory=" $2}') +free_memory=$(opensips-cli -x mi get_statistics free_size | awk '/shmem:free_size/ { gsub(/[",]/,""); print "Free Memory=" $2}') +load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') +total_files=$(lsof -c opensips | wc -l) + + +echo $total_memory +echo $used_memory +echo $free_memory +echo $load_average +echo "Open files="$total_files + +exit + diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh index e8fe2b249..7127ec5ea 100644 --- a/snmp/opensips-stats.sh +++ b/snmp/opensips-stats.sh @@ -1,5 +1,6 @@ #!/bin/bash # Author: Sharad Kumar +# This script is for OpenSIPS 2.X + version total_memory=$(opensipsctl fifo get_statistics total_size | awk '{print "Total Memory=" $2}') used_memory=$(opensipsctl fifo get_statistics real_used_size | awk '{print "Used Memory=" $2}') From e0b7921517247d9ce9cfed40614f06ec9faada51 Mon Sep 17 00:00:00 2001 From: FingerlessGloves Date: Sat, 2 Jan 2021 02:03:51 +0000 Subject: [PATCH 286/497] Show version down to . release for any Debian (#337) Show version down to . release for any Debian based distro's Proxmox and Debian 10.6 tested. --- snmp/distro | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/snmp/distro b/snmp/distro index 9e8768cd5..69cd452a9 100755 --- a/snmp/distro +++ b/snmp/distro @@ -54,6 +54,7 @@ elif [ "${OS}" = "Linux" ] ; then elif [ -f /etc/debian_version ] ; then DIST="Debian `cat /etc/debian_version`" REV="" + IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` fi @@ -62,16 +63,13 @@ elif [ "${OS}" = "Linux" ] ; then fi if [ -f /usr/bin/pveversion ]; then DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" - IGNORE_OS_RELEASE=1 fi if [ -f /usr/bin/pmgversion ]; then # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" - IGNORE_OS_RELEASE=1 fi if [ -f /etc/dogtag ]; then DIST=`cat /etc/dogtag` - IGNORE_OS_RELEASE=1 fi elif [ -f /etc/gentoo-release ] ; then From 6fe6aa4956d53d471149dd6e6b0cf1daea20ced7 Mon Sep 17 00:00:00 2001 From: Mike Dixson Date: Sat, 2 Jan 2021 02:07:23 +0000 Subject: [PATCH 287/497] Update check_mpre (#336) --recv-only argument doesn't exist on some well used version of netcat. This method timesout after 1 second of idle time. Only possible downside is that if it takes more than 1 second to initiate the connection it may timeout too. --- agent-local/check_mrpe | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index 6cd3a044c..1b8401f99 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -55,7 +55,7 @@ else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi -for i in `$BIN_NC --recv-only $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<< Date: Mon, 18 Jan 2021 17:54:03 +0000 Subject: [PATCH 288/497] Fix STDERR output corrupting json output in mdadm app (#344) When checking if arrays have slaves, the mdadm script, does an ls/$LS of the device to see if it exists. This $LS throws an error to STDERR if it does not match. This output is caught by snmp and corrupts the json output --- snmp/mdadm | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index ecbc8e510..b25629266 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -22,7 +22,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) # ignore arrays with no slaves - if [ -z "$($LS -1 $RAID/slaves)" ] ; then + if [ -z "$($LS -1 $RAID/slaves 2> /dev/null)" ] ; then continue fi # ignore "non existing" arrays From 9cf1be5d0cd6d1beb0de1f2964afef478f03d69b Mon Sep 17 00:00:00 2001 From: Russell Morris Date: Fri, 29 Jan 2021 07:31:05 -0600 Subject: [PATCH 289/497] Add snmp script for BeableBoard Temperature Sensors (#330) --- snmp/beagleboard.sh | 2 ++ 1 file changed, 2 insertions(+) create mode 100755 snmp/beagleboard.sh diff --git a/snmp/beagleboard.sh b/snmp/beagleboard.sh new file mode 100755 index 000000000..a73376d95 --- /dev/null +++ b/snmp/beagleboard.sh @@ -0,0 +1,2 @@ +#!/bin/sh +cat /sys/devices/virtual/thermal/thermal_zone*/temp From 40e593f64707b436ccf77289636949c578d6f2b5 Mon Sep 17 00:00:00 2001 From: Kanok Chantrasmi Date: Sat, 6 Mar 2021 07:01:57 +0700 Subject: [PATCH 290/497] gpsd python error (#352) gpsd script occasionally results in python error. cause: the expected info from GPS unit on each update has more than 10 lines, therefore, python didn't find the expected wording and result in a python error correction: increase the line from gpspipe from 10 to 20 lines --- snmp/gpsd | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/gpsd b/snmp/gpsd index 48f1be4ad..70fe924c9 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -26,7 +26,7 @@ TMPFILE=$(mktemp) trap "rm -f $TMPFILE" 0 2 3 15 # Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 10 > $TMPFILE +$BIN_GPIPE -w -n 20 > $TMPFILE # Parse Temp file for GPSD Data VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` @@ -42,4 +42,4 @@ SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json; # Output info for SNMP Extend echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' -rm $TMPFILE \ No newline at end of file +rm $TMPFILE From a769f0ef4fabca21e5efd1d71d1076c3fc1c1b00 Mon Sep 17 00:00:00 2001 From: Chewie Date: Sat, 6 Mar 2021 20:38:46 +0000 Subject: [PATCH 291/497] add DHCP stats to PiHole (#351) * add DHCP stats to PiHole * Update pi-hole Removed need for `calc` thanks to @jellyfrog 's suggestion --- snmp/pi-hole | 33 +++++++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) diff --git a/snmp/pi-hole b/snmp/pi-hole index fc5f52d77..f5cc5c9ab 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -8,11 +8,17 @@ API_AUTH_KEY="" API_URL="localhost/admin/api.php" URL_READ_ONLY="?summaryRaw" URL_QUERY_TYPE="?getQueryTypes&auth=" +PICONFIGFILE='/etc/pihole/setupVars.conf' +DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then . $CONFIGFILE fi +# read in pi-hole variables for DHCP range +if [ -f $PICONFIGFILE ]; then + . $PICONFIGFILE +fi #/ Description: BASH script to get Pi-hole stats #/ Examples: ./pi-hole-stats.sh @@ -66,6 +72,16 @@ debug() { else echo '[ok] URL_QUERY_TYPE not set' fi + if [ -f $PICONFIGFILE ]; then + echo '[ok] Pi-Hole config file exists, DHCP stats will be captured if scope active' + else + echo '[error] Pi-Hole config file does not exist, DHCP stats will not be captured if used' + fi + if [ -f $DHCPLEASEFILE ]; then + echo '[ok] DHCP lease file exists, DHCP stats will be captured if scope active' + else + echo '[error] DHCP lease file does not exist, DHCP stats will not be captured if used' + fi } exportdata() { @@ -76,6 +92,23 @@ exportdata() { # A / AAAA / PTR / SRV GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo $GET_QUERY_TYPE | tr " " "\n" + + # Find number of DHCP address in scope and current lease count + # case-insensitive compare, just in case :) + if [ "${DHCP_ACTIVE,,}" = "true" ]; then + # Max IP addresses in scope + # Convert IPs to decimal and subtract + IFS="." read -r -a array <<< $DHCP_START + DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + IFS="." read -r -a array <<< $DHCP_END + DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL + # Current lease count + cat $DHCPLEASEFILE | wc -l + else + echo 0 + echo 0 + fi } if [ -z $* ]; then From 1bb79cd8c89440be198641d5ef69f5fc8a112c68 Mon Sep 17 00:00:00 2001 From: 0xbad0c0d3 <0xbad0c0d3@gmail.com> Date: Sun, 7 Mar 2021 21:26:47 +0200 Subject: [PATCH 292/497] docker stats script (#307) * docker stats script * Update docker-stats.sh Co-authored-by: Jellyfrog --- snmp/docker-stats.sh | 38 ++++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) create mode 100644 snmp/docker-stats.sh diff --git a/snmp/docker-stats.sh b/snmp/docker-stats.sh new file mode 100644 index 000000000..7ac7473f2 --- /dev/null +++ b/snmp/docker-stats.sh @@ -0,0 +1,38 @@ +#!/usr/bin/env bash + +VERSION=1 + +function dockerStatsFormat() { + cat <&1) +ERROR=$? +if [ $ERROR -ne 0 ];then + ERROR_STRING=${STATS} + unset STATS +fi +jq -nMc \ + --slurpfile stats <(echo "${STATS:-}") \ + --arg version "${VERSION:-1}" \ + --arg error "${ERROR:-0}" \ + --arg errorString "${ERROR_STRING:-}" \ + '{"version": $version, "data": $stats, "error": $error, "errorString": $errorString }' + +# vim: tabstop=2:shiftwidth=2:expandtab: From cca8ebd33e921ca8303cafdc086e0ca75e60e860 Mon Sep 17 00:00:00 2001 From: yrebrac Date: Mon, 8 Mar 2021 06:27:44 +1100 Subject: [PATCH 293/497] Add powermon app script (#348) * added snmp/powermon-snmp.py * powermon script v1.3 * powermon script v1.3a * powermon-snmp.py v1.4 --- snmp/powermon-snmp.py | 362 ++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 362 insertions(+) create mode 100755 snmp/powermon-snmp.py diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py new file mode 100755 index 000000000..2b3ed74b6 --- /dev/null +++ b/snmp/powermon-snmp.py @@ -0,0 +1,362 @@ +#!/usr/bin/python3 +# +# Copyright(C) 2021 Ben Carbery yrebrac@upaya.net.au +# +# LICENSE - GPLv3 +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# version 3. See https://www.gnu.org/licenses/gpl-3.0.txt +# +# DESCRIPTION +# +# The script attempts to determine the current power consumption of the host via +# one or more methods. The scripts should make it easier to add your own methods +# if no included one is suitable for your host machine. +# +# The script should be called by the snmpd daemon on the host machine. This is +# achieved via the 'extend' functionality in snmpd. For example, in +# /etc/snmp/snmpd.conf: +# extend powermon /usr/local/bin/powermon-snmp.py +# +# CUSTOMISING RESULTS +# +# The results can be accessed via the nsExtend MIBs from another host, e.g. +# snmpwalk -v 2c -c \ +# +# +# The results are returned in a JSON format suitable for graphing in LibreNMS. +# A LibreNMS 'application' is available for this purpose. +# +# The application expects to see a single top-level reading in the results in +# terms of Watts. This can be derived from a reading from one of the sub- +# components, currently the ACPI 'meter' or 'psus'. But you must tell the script +# which is the top-level or final reading you want to use in the results. This +# allows you to sum results from dual PSUs or apply your own power factor for +# example. To achieve this see the definition of 'data["reading"]' at the end +# of the script, and modify as required. Two examples are provided. +# +# If you want to track your electricity cost you should also update the cost +# per kWh value below. When you cost changes you can update the value. The +# supply rate will be returned in the results +# +# COMPATIBILITY +# +# - Linux, not tested on other OS +# - Tested on python 3.6, 3.8 +# +# INSTALLATION +# +# - Sensors method: pip install PySensors +# - hpasmcli method: install hp-health package for your distribution +# - Copy this script somewhere, e.g. /usr/local/bin +# - Uncomment costPerkWh and change the value +# - Test then customise top-level reading +# - Add the 'extend' config to snmpd.conf +# - https://docs.librenms.org/Extensions/Applications/#powermon +# +# CHANGELOG +# +# 20210130 - v1.0 - initial, implemented PySensors method +# 20210131 - v1.1 - implemented hpasmcli method +# 20210204 - v1.2 - added top-level reading, librenms option +# 20210205 - v1.3 - added cents per kWh +# 20210205 - v1.4 - improvement to UI + +version = 1.4 + +### Libraries + +import os +import sys +import getopt +import json +import re +import shutil +import subprocess + +### Option defaults + +method = "" # must be one of methods array +verbose = False +warnings = False +librenms = True # Return results in a JSON format suitable for Librenms + # Set to false to return JSON data only +pretty = False # Pretty printing + +### Globals + +error = 0 +errorString = "" +data = {} +result = {} +usage = "USAGE: " + os.path.basename(__file__) + " [-h|--help] |" \ + + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" \ + + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +methods = ["sensors", "hpasmcli"] +#costPerkWh = 0.15 # <<<< UNCOMMENT + +### General functions + +def errorMsg(message): + sys.stderr.write("ERROR: " + message + "\n") + +def usageError(message="Invalid argument"): + errorMsg(message) + sys.stderr.write(usage + "\n") + sys.exit(1) + +def warningMsg(message): + if verbose or warnings: + sys.stderr.write("WARN: " + message + "\n") + +def verboseMsg(message): + if verbose: + sys.stderr.write("INFO: " + message + "\n") + +def listMethods(): + global verbose + verbose = True + verboseMsg("Available methods are: " + str(methods).strip('[]')) + +### Data functions + +def getData(method): + if method == "sensors": + data = getSensorData() + + elif method == "hpasmcli": + data = getHPASMData() + else: + usageError("You must specify a method.") + + return data + +def getSensorData(): + global error, errorString + error = 2 + errorString = "No power sensor found" + + try: + import sensors + sensors.init() + + except ModuleNotFoundError as e: + errorMsg(str(e)) + verboseMsg("Try 'pip install PySensors'") + sys.exit(1) + + except FileNotFoundError as e: + errorMsg("Module 'sensors' appears to be missing a dependancy: " + str(e)) + verboseMsg("Try 'dnf install lm_sensors'") + sys.exit(1) + + except: + e = sys.exc_info() + errorMsg("Module sensors is installed but failed to initialise: " + str(e)) + sys.exit(1) + + sdata = {} + sdata["meter"] = {} + sdata["psu"] = {} + + re_meter = "^power_meter" + + power_chips = [] + try: + for chip in sensors.iter_detected_chips(): + chip_name = str(chip) + verboseMsg("Found chip: " + chip_name) + + if re.search(re_meter, chip_name): + verboseMsg("Found power meter: " + chip_name) + error = 0 + errorString = "" + + junk, meter_id = chip_name.split('acpi-', 1) + sdata["meter"][meter_id] = {} + + for feature in chip: + feature_label = str(feature.label) + verboseMsg("Found feature: " + feature_label) + + if re.search("^power", feature_label): + sdata["meter"][meter_id]["reading"] = feature.get_value() + + if feature.get_value() == 0: + # warning as downstream may try to divide by 0 + warningMsg("Sensors returned a zero value") + + else: + # store anything else in case label is something unexpected + sdata[chip_name][feature_label] = feature.get_value() + + except: + es = sys.exc_info() + error = 1 + errorString = "Unable to get data: General exception: " + str(es) + + finally: + sensors.cleanup() + return sdata + +def getHPASMData(): + global error, errorString + + exe = shutil.which('hpasmcli') + #if not os.access(candidate, os.W_OK): + cmd = [exe, '-s', 'show powermeter; show powersupply'] + warningMsg("hpasmcli only runs as root") + + try: + output = subprocess.run(cmd, capture_output=True, check=True, text=True, timeout=2) + + except subprocess.CalledProcessError as e: + errorMsg(str(e) + ": " + str(e.stdout).strip('\n')) + sys.exit(1) + + rawdata = str(output.stdout).replace('\t', ' ').replace('\n ', '\n').split('\n') + + hdata = {} + hdata["meter"] = {} + hdata["psu"] = {} + + re_meter = "^Power Meter #([0-9]+)" + re_meter_reading = "^Power Reading :" + re_psu = "^Power supply #[0-9]+" + re_psu_present = "^Present :" + re_psu_redundant = "^Redundant:" + re_psu_condition = "^Condition:" + re_psu_hotplug = "^Hotplug :" + re_psu_reading = "^Power :" + + for line in rawdata: + if re.match(re_meter, line): + verboseMsg("found power meter: " + line) + junk, meter_id = line.split('#', 1) + hdata["meter"][meter_id] = {} + + elif re.match(re_meter_reading, line): + verboseMsg("found power meter reading: " + line) + junk, meter_reading = line.split(':', 1) + hdata["meter"][meter_id]["reading"] = meter_reading.strip() + + elif re.match(re_psu, line): + verboseMsg("found power supply: " + line) + junk, psu_id = line.split('#', 1) + hdata["psu"][psu_id] = {} + + elif re.match(re_psu_present, line): + verboseMsg("found power supply present: " + line) + junk, psu_present = line.split(':', 1) + hdata["psu"][psu_id]["present"] = psu_present.strip() + + elif re.match(re_psu_redundant, line): + verboseMsg("found power supply redundant: " + line) + junk, psu_redundant = line.split(':', 1) + hdata["psu"][psu_id]["redundant"] = psu_redundant.strip() + + elif re.match(re_psu_condition, line): + verboseMsg("found power supply condition: " + line) + junk, psu_condition = line.split(':', 1) + hdata["psu"][psu_id]["condition"] = psu_condition.strip() + + elif re.match(re_psu_hotplug, line): + verboseMsg("found power supply hotplug: " + line) + junk, psu_hotplug = line.split(':', 1) + hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip() + + elif re.match(re_psu_reading, line): + verboseMsg("found power supply reading: " + line) + junk, psu_reading = line.split(':', 1) + hdata["psu"][psu_id]["reading"] = psu_reading.replace('Watts', '').strip() + + return hdata + +# Argument Parsing +try: + opts, args = getopt.gnu_getopt( + sys.argv[1:], 'm:hlNpvw', ['method', 'help', 'list-methods', 'no-librenms', 'pretty', 'verbose', 'warnings'] + ) + if len(args) != 0: + usageError("Unknown argument") + +except getopt.GetoptError as e: + usageError(str(e)) + +for opt, val in opts: + if opt in ["-h", "--help"]: + print(usage) + sys.exit(0) + + elif opt in ["-l", "--list-methods"]: + listMethods() + sys.exit(0) + + elif opt in ["-m", "--method"]: + if val not in methods: + usageError("Invalid method: '" + val + "'") + else: + method = val + + elif opt in ["-N", "--no-librenms"]: + librenms = False + + elif opt in ["-p", "--pretty"]: + pretty = True + + elif opt in ["-v", "--verbose"]: + verbose = True + + elif opt in ["-w", "--warnings"]: + warnings = True + + else: + continue + +# Electricity Cost +try: + costPerkWh + +except NameError: + errorMsg("cost per kWh is undefined (uncomment in script)") + sys.exit(1) + +# Get data +data = getData(method) +data["supply"] = {} +data["supply"]["rate"] = costPerkWh + +# Top-level reading +# CUSTOMISE THIS FOR YOUR HOST +# i.e. by running with -p -n -m and see what you get and then updating where +# in the JSON data the top-level reading is sourced from +try: + # Example 1 - take reading from ACPI meter id 1 + data["reading"] = data["meter"]["1"]["reading"] + + # Example 2 - sum the two power supplies and apply a power factor + #pf = 0.95 + #data["reading"] = str( float(data["psu"]["1"]["reading"]) \ + # + float(data["psu"]["2"]["reading"]) / pf ) + +except: + data["reading"] = 0.0 + +# Build result +if librenms: + result['version']=version + result['error']=error + result['errorString']=errorString + result['data']=data + +else: + result=data + +# Print result +if pretty: + print(json.dumps(result, indent=2)) + +else: + print(json.dumps(result)) + From 35be83451dfee7de36e086e3c067387e96c3e144 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 12:24:30 +0100 Subject: [PATCH 294/497] Cleanup some code (#355) * Format with isort * Format with Black * Fix CRLF * Format with shellcheck * Fix some warning * Fix PHP style * Dont modifiy check_mk files * Fixes --- agent-local/ceph | 109 ++++++++---- agent-local/check_mrpe | 6 +- agent-local/freeswitch | 8 +- agent-local/hddtemp | 14 +- agent-local/nginx | 32 ++-- agent-local/nginx-python3.py | 8 +- agent-local/powerdns | 41 +++-- agent-local/powerdns-recursor | 11 +- agent-local/rocks.sh | 8 +- agent-local/rrdcached | 28 +-- agent-local/unbound.sh | 2 +- mk_enplug | 4 +- snmp/Openwrt/wlClients.sh | 10 +- snmp/Openwrt/wlFrequency.sh | 6 +- snmp/Openwrt/wlNoiseFloor.sh | 6 +- snmp/Openwrt/wlRate.sh | 14 +- snmp/Openwrt/wlSNR.sh | 14 +- snmp/apache-stats.py | 44 ++--- snmp/apache-stats.sh | 22 +-- snmp/backupninja.py | 44 +++-- snmp/certificate.py | 61 ++++--- snmp/chip.sh | 54 +++--- snmp/dhcp.py | 149 ++++++++-------- snmp/exim-stats.sh | 14 +- snmp/freeradius.sh | 84 ++++----- snmp/gpsd | 90 +++++----- snmp/icecast-stats.sh | 8 +- snmp/mailcow-dockerized-postfix | 42 +++-- snmp/mailscanner.php | 88 +++++----- snmp/mdadm | 38 ++-- snmp/mysql-stats | 288 +++++++++++++++++++----------- snmp/nginx | 8 +- snmp/nginx-python2 | 37 ++-- snmp/ntp-client | 18 +- snmp/ntp-server.sh | 96 +++++----- snmp/nvidia | 6 +- snmp/opensip3-stats.sh | 10 +- snmp/opensips-stats.sh | 10 +- snmp/osupdate | 32 ++-- snmp/phpfpmsp | 26 +-- snmp/pi-hole | 20 +-- snmp/postfix-queues | 2 +- snmp/powerdns-dnsdist | 124 ++++++------- snmp/powerdns-recursor | 9 +- snmp/powerdns.py | 20 ++- snmp/powermon-snmp.py | 118 ++++++++----- snmp/puppet_agent.py | 58 ++++--- snmp/pureftpd.py | 63 ++++--- snmp/raspberry.sh | 48 ++--- snmp/redis.py | 36 ++-- snmp/sdfsinfo | 8 +- snmp/seafile.py | 132 +++++++------- snmp/shoutcast.php | 188 ++++++++++---------- snmp/ups-apcups.sh | 6 +- snmp/ups-nut.sh | 8 +- snmp/voipmon-stats.sh | 8 +- snmp/zfs-freebsd.py | 297 ++++++++++++++++++------------- snmp/zfs-linux | 299 ++++++++++++++++++-------------- 58 files changed, 1682 insertions(+), 1352 deletions(-) diff --git a/agent-local/ceph b/agent-local/ceph index 1301f79ec..1493fa155 100755 --- a/agent-local/ceph +++ b/agent-local/ceph @@ -14,80 +14,117 @@ # # See http://www.gnu.org/licenses/gpl.txt for the full license -from subprocess import check_output import json +from subprocess import check_output + def cephversion(): - cephv = check_output(["/usr/bin/ceph", "version"]).decode("utf-8").replace('ceph version ', '') - major, minor = cephv.split('.')[0:2] + cephv = ( + check_output(["/usr/bin/ceph", "version"]) + .decode("utf-8") + .replace("ceph version ", "") + ) + major, minor = cephv.split(".")[0:2] return [int(major), int(minor)] + def cephdf(): - cephdf = check_output(["/usr/bin/ceph", "-f", "json", "df"]).decode("utf-8").replace('-inf', '0') + cephdf = ( + check_output(["/usr/bin/ceph", "-f", "json", "df"]) + .decode("utf-8") + .replace("-inf", "0") + ) s = json.loads(cephdf) try: - ts = s['stats']['total_bytes'] - except: - ts = s['stats']['total_space'] + ts = s["stats"]["total_bytes"] + except KeyError: + ts = s["stats"]["total_space"] try: - tu = s['stats']['total_used_bytes'] - except: - tu = s['stats']['total_used'] + tu = s["stats"]["total_used_bytes"] + except KeyError: + tu = s["stats"]["total_used"] try: - ta = s['stats']['total_avail_bytes'] - except: - ta = s['stats']['total_avail'] + ta = s["stats"]["total_avail_bytes"] + except KeyError: + ta = s["stats"]["total_avail"] print("c:%i:%i:%i" % (ts, tu, ta)) - for p in s['pools']: - b = p['stats']['bytes_used'] - a = p['stats']['max_avail'] - o = p['stats']['objects'] - print("%s:%i:%i:%i" % (p['name'], a, b, o)) + for p in s["pools"]: + b = p["stats"]["bytes_used"] + a = p["stats"]["max_avail"] + o = p["stats"]["objects"] + print("%s:%i:%i:%i" % (p["name"], a, b, o)) def osdperf(): global major - osdperf = check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]).decode("utf-8").replace('-inf', '0') + osdperf = ( + check_output(["/usr/bin/ceph", "-f", "json", "osd", "perf"]) + .decode("utf-8") + .replace("-inf", "0") + ) if major > 13: - for o in json.loads(osdperf)['osdstats']['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + for o in json.loads(osdperf)["osdstats"]["osd_perf_infos"]: + print( + "osd.%s:%i:%i" + % ( + o["id"], + o["perf_stats"]["apply_latency_ms"], + o["perf_stats"]["commit_latency_ms"], + ) + ) else: - for o in json.loads(osdperf)['osd_perf_infos']: - print("osd.%s:%i:%i" % (o['id'], o['perf_stats']['apply_latency_ms'], o['perf_stats']['commit_latency_ms'])) + for o in json.loads(osdperf)["osd_perf_infos"]: + print( + "osd.%s:%i:%i" + % ( + o["id"], + o["perf_stats"]["apply_latency_ms"], + o["perf_stats"]["commit_latency_ms"], + ) + ) + def poolstats(): global major - poolstats = check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]).decode("utf-8").replace('-inf', '0') + poolstats = ( + check_output(["/usr/bin/ceph", "-f", "json", "osd", "pool", "stats"]) + .decode("utf-8") + .replace("-inf", "0") + ) for p in json.loads(poolstats): try: - r = p['client_io_rate']['read_bytes_sec'] - except: + r = p["client_io_rate"]["read_bytes_sec"] + except KeyError: r = 0 try: - w = p['client_io_rate']['write_bytes_sec'] - except: + w = p["client_io_rate"]["write_bytes_sec"] + except KeyError: w = 0 try: if major > 11: - o = p['client_io_rate']['read_op_per_sec'] + p['client_io_rate']['write_op_per_sec'] + o = ( + p["client_io_rate"]["read_op_per_sec"] + + p["client_io_rate"]["write_op_per_sec"] + ) else: - o = p['client_io_rate']['op_per_sec'] - except: + o = p["client_io_rate"]["op_per_sec"] + except KeyError: o = 0 - print("%s:%i:%i:%i" % (p['pool_name'], o, w, r)) + print("%s:%i:%i:%i" % (p["pool_name"], o, w, r)) + major, minor = cephversion() -print ("<<>>") -print ("") +print("<<>>") +print("") poolstats() -print ("") +print("") osdperf() -print ("") +print("") cephdf() diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index 1b8401f99..d21b6d9a1 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -55,11 +55,11 @@ else SED_CMD="s/(.*) \(.*\) [0-9] \(.*\)/\1 \2/p" fi -for i in `$BIN_NC -w 1 $Hval $pval 2>&1 | $BIN_SED '/^<<>>/,/^<<&1 | $BIN_SED '/^<<>>/,/^<</dev/null 2>&1; then - disks=`lsblk -dnp|cut -d' ' -f1 | tr '\n' ' '` + disks=$(lsblk -dnp|cut -d' ' -f1 | tr '\n' ' ') else - disks=`find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' '` + disks=$(find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' ') fi -hddtemp=`which hddtemp 2>/dev/null` +hddtemp=$(which hddtemp 2>/dev/null) if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then if type parallel > /dev/null 2>&1; then # When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!) - output=`parallel ${hddtemp} -w -q ::: ${disks} 2>/dev/null` + output=$(parallel "${hddtemp}" -w -q ::: "${disks}" 2>/dev/null) else - output=`${hddtemp} -w -q ${disks} 2>/dev/null` + output=$(${hddtemp} -w -q "${disks}" 2>/dev/null) fi - content=`echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176'` + content=$(echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176') if [ "${content}" != "" ]; then echo '<<>>' - echo ${content} + echo "${content}" echo else echo "no hddtemp compatible disks found" >&2 diff --git a/agent-local/nginx b/agent-local/nginx index d6319f1b2..c1d5fd18d 100755 --- a/agent-local/nginx +++ b/agent-local/nginx @@ -1,9 +1,9 @@ #!/usr/bin/env python -import urllib2 import re +import urllib2 -data = urllib2.urlopen('http://127.0.0.1/nginx-status').read() +data = urllib2.urlopen("http://127.0.0.1/nginx-status").read() params = {} @@ -11,28 +11,24 @@ for line in data.split("\n"): smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) elif req: - params["Requests"] = req.group(3) + params["Requests"] = req.group(3) else: - pass + pass -dataorder = [ - "Active", - "Reading", - "Writing", - "Waiting", - "Requests" - ] +dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] -print "<<>>\n"; +print "<<>>\n" for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) + print Active else: - print params[param] + print params[param] diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py index 2464f89d1..fd710ba8c 100755 --- a/agent-local/nginx-python3.py +++ b/agent-local/nginx-python3.py @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -from urllib.request import urlopen import re +from urllib.request import urlopen -data = urlopen('http://127.0.0.1/nginx-status').read() +data = urlopen("http://127.0.0.1/nginx-status").read() params = {} @@ -24,7 +24,9 @@ for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) print(Active) else: print(params[param]) diff --git a/agent-local/powerdns b/agent-local/powerdns index d4fc6bb30..749633006 100755 --- a/agent-local/powerdns +++ b/agent-local/powerdns @@ -1,25 +1,40 @@ #!/usr/bin/env python3 -from subprocess import Popen, PIPE +from subprocess import PIPE, Popen -kvars = [ - 'corrupt-packets', 'deferred-cache-inserts', 'deferred-cache-lookup', - 'latency', 'packetcache-hit', 'packetcache-miss', 'packetcache-size', - 'qsize-q', 'query-cache-hit', 'query-cache-miss', 'recursing-answers', - 'recursing-questions', 'servfail-packets', 'tcp-answers', 'tcp-queries', - 'timedout-packets', 'udp-answers', 'udp-queries', 'udp4-answers', - 'udp4-queries', 'udp6-answers', 'udp6-queries' +kvars = [ + "corrupt-packets", + "deferred-cache-inserts", + "deferred-cache-lookup", + "latency", + "packetcache-hit", + "packetcache-miss", + "packetcache-size", + "qsize-q", + "query-cache-hit", + "query-cache-miss", + "recursing-answers", + "recursing-questions", + "servfail-packets", + "tcp-answers", + "tcp-queries", + "timedout-packets", + "udp-answers", + "udp-queries", + "udp4-answers", + "udp4-queries", + "udp6-answers", + "udp6-queries", ] rvars = {} -cmd = ['pdns_control', 'show', '*'] +cmd = ["pdns_control", "show", "*"] -for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(','): - v = l.split('=') +for l in Popen(cmd, stdout=PIPE).communicate()[0].decode().rstrip().split(","): + v = l.split("=") if len(v) > 1: - rvars[v[0]] = v[1] + rvars[v[0]] = v[1] print("<<>>") for k in kvars: print(rvars[k]) - diff --git a/agent-local/powerdns-recursor b/agent-local/powerdns-recursor index 6949c7f44..ed94d542c 100755 --- a/agent-local/powerdns-recursor +++ b/agent-local/powerdns-recursor @@ -1,13 +1,14 @@ #!/usr/bin/env python3 -import json, subprocess -from subprocess import Popen, PIPE +import json +import subprocess +from subprocess import PIPE, Popen -input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] +input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0] data = [] for line in input.splitlines(): item = line.split() - data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + data.append({"name": item[0].decode(), "value": int(item[1].decode())}) -print('<<>>') +print("<<>>") print(json.dumps(data)) diff --git a/agent-local/rocks.sh b/agent-local/rocks.sh index f3547adaa..9ff1ec35e 100755 --- a/agent-local/rocks.sh +++ b/agent-local/rocks.sh @@ -15,8 +15,8 @@ PENDING_JOBS=$($QSTAT -u "*" -s p | wc -l) SUSPEND_JOBS=$($QSTAT -u "*" -s s | wc -l) ZOMBIE_JOBS=$($QSTAT -u "*" -s z | wc -l) -echo $RUNNING_JOBS; -echo $PENDING_JOBS; -echo $SUSPEND_JOBS; -echo $ZOMBIE_JOBS; +echo "$RUNNING_JOBS"; +echo "$PENDING_JOBS"; +echo "$SUSPEND_JOBS"; +echo "$ZOMBIE_JOBS"; diff --git a/agent-local/rrdcached b/agent-local/rrdcached index 0fca240fa..33ec66fd7 100755 --- a/agent-local/rrdcached +++ b/agent-local/rrdcached @@ -1,13 +1,13 @@ #!/usr/bin/env python +import os import socket import sys -import os # Unix socket -server_address = '/var/run/rrdcached.sock' +server_address = "/var/run/rrdcached.sock" # TCP socket -#server_address = 'localhost:42217' +# server_address = 'localhost:42217' sock = None try: @@ -15,31 +15,31 @@ try: sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) else: sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - if ':' in server_address: - split = server_address.rsplit(':', 1) - server_address = (split[0],int(split[1])) + if ":" in server_address: + split = server_address.rsplit(":", 1) + server_address = (split[0], int(split[1])) else: server_address = (server_address, 42217) sock.connect(server_address) except socket.error as e: - sys.stderr.write(str(e) + ': ' + str(server_address) + '\n') + sys.stderr.write(str(e) + ": " + str(server_address) + "\n") sys.exit(1) -buffer = '' +buffer = "" max = -1 try: sock.settimeout(5) - sock.sendall('STATS\n'.encode()) - while max == -1 or len(buffer.split('\n')) < max: + sock.sendall("STATS\n".encode()) + while max == -1 or len(buffer.split("\n")) < max: buffer += sock.recv(1024).decode() if max == -1: # the first line contains the number of following lines - max = int(buffer.split(' ')[0]) + 1 + max = int(buffer.split(" ")[0]) + 1 except socket.error as e: - sys.stderr.write(str(e) + '\n') + sys.stderr.write(str(e) + "\n") sys.exit(1) sock.close() -print('<<>>') -print(buffer.rstrip('\n')) +print("<<>>") +print(buffer.rstrip("\n")) diff --git a/agent-local/unbound.sh b/agent-local/unbound.sh index d9b378892..9383701e6 100755 --- a/agent-local/unbound.sh +++ b/agent-local/unbound.sh @@ -1,5 +1,5 @@ #!/bin/bash -unboundctl=`which unbound-control` +unboundctl=$(which unbound-control) if [ "$?" != "0" ]; then #Unbound control executable doesn't exist exit diff --git a/mk_enplug b/mk_enplug index 5abecb72f..7ecbd73ec 100755 --- a/mk_enplug +++ b/mk_enplug @@ -56,7 +56,7 @@ if [ ! -z "$s" ]; then exit 1 fi - if [ `script_enabled $s` != "yes" ]; then - enable_script $s + if [ `script_enabled "$s"` != "yes" ]; then + enable_script "$s" fi fi diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index cf6195f62..5becad170 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -12,15 +12,15 @@ if [ $# -gt 1 ]; then fi # Get path to this script -scriptdir=$(dirname $(readlink -f -- $0)) +scriptdir=$(dirname $(readlink -f -- "$0")) # Get hostname, interface list. Set target, which is name returned for interface -hostname=`/bin/uname -n` -if [ $1 ]; then +hostname=$(/bin/uname -n) +if [ "$1" ]; then interfaces=$1 target=$1 else - interfaces=`cat $scriptdir/wlInterfaces.txt | cut -f 1 -d","` + interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",") target=wlan fi @@ -28,7 +28,7 @@ fi count=0 for interface in $interfaces do - new=`/usr/sbin/iw dev $interface station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l` + new=$(/usr/sbin/iw dev "$interface" station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) count=$(( $count + $new )) done diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 119fb54af..83e68b1d1 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -12,8 +12,8 @@ if [ $# -ne 1 ]; then fi # Get hostname, extract frequency -hostname=`/bin/uname -n` -frequency=`/usr/sbin/iw dev $1 info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" "` +hostname=$(/bin/uname -n) +frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result -/bin/echo $frequency +/bin/echo "$frequency" diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index ab404364d..47d4b4ec2 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -13,8 +13,8 @@ fi # Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -hostname=`/bin/uname -n` -noise=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1` +hostname=$(/bin/uname -n) +noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result -/bin/echo $noise +/bin/echo "$noise" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 76ab0c881..08b68b1bd 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -16,17 +16,17 @@ fi # Get hostname, calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -hostname=`/bin/uname -n` -ratelist=`/usr/sbin/iw dev $1 station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" "` +hostname=$(/bin/uname -n) +ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") if [ "$3" == "sum" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') elif [ "$3" == "avg" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}') elif [ "$3" == "min" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}') elif [ "$3" == "max" ]; then - result=`/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}'` + result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}') fi # Return snmp result -echo $result +echo "$result" diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 337d55979..d19283d82 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -14,17 +14,17 @@ if [ $# -ne 2 ]; then fi # Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) -hostname=`/bin/uname -n` -snrlist=`/usr/bin/iwinfo $1 assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1` +hostname=$(/bin/uname -n) +snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) if [ "$2" == "sum" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') elif [ "$2" == "avg" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}') elif [ "$2" == "min" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}') elif [ "$2" == "max" ]; then - result=`/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}'` + result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}') fi # Return snmp result -echo $result +echo "$result" diff --git a/snmp/apache-stats.py b/snmp/apache-stats.py index 1421c20e3..d55ae8d52 100755 --- a/snmp/apache-stats.py +++ b/snmp/apache-stats.py @@ -22,63 +22,67 @@ import urllib.request cachetime = 30 -cachefile = '/var/cache/librenms/apache-snmp' +cachefile = "/var/cache/librenms/apache-snmp" # Check for a cache file newer than cachetime seconds ago if os.path.isfile(cachefile) and (time.time() - os.stat(cachefile)[8]) < cachetime: # Use cached data - f = open(cachefile, 'r') + f = open(cachefile, "r") data = f.read() f.close() else: # Grab the status URL (fresh data), needs package urllib3 - data = urllib.request.urlopen("http://localhost/server-status?auto").read().decode('UTF-8') + data = ( + urllib.request.urlopen("http://localhost/server-status?auto") + .read() + .decode("UTF-8") + ) # Write file - f = open(cachefile+'.TMP.'+str(os.getpid()), 'w') + f = open(cachefile + ".TMP." + str(os.getpid()), "w") f.write(data) f.close() - os.rename(cachefile+'.TMP.'+str(os.getpid()), cachefile) + os.rename(cachefile + ".TMP." + str(os.getpid()), cachefile) # dice up the data -scoreboardkey = ['_', 'S', 'R', 'W', 'K', 'D', 'C', 'L', 'G', 'I', '.'] +scoreboardkey = ["_", "S", "R", "W", "K", "D", "C", "L", "G", "I", "."] params = {} for line in data.splitlines(): - fields = line.split(': ') + fields = line.split(": ") if len(fields) <= 1: continue # "localhost" as first line causes out of index error - elif fields[0] == 'Scoreboard': + elif fields[0] == "Scoreboard": # count up the scoreboard into states states = {} for state in scoreboardkey: states[state] = 0 for state in fields[1]: states[state] += 1 - elif fields[0] == 'Total kBytes': + elif fields[0] == "Total kBytes": # turn into base(byte) value - params[fields[0]] = int(fields[1])*1024 + params[fields[0]] = int(fields[1]) * 1024 elif len(fields) > 1: # just store everything else params[fields[0]] = fields[1] # output the data in order(this is because some platforms don't have them all) dataorder = [ - 'Total Accesses', - 'Total kBytes', - 'CPULoad', - 'Uptime', - 'ReqPerSec', - 'BytesPerSec', - 'BytesPerReq', - 'BusyWorkers', - 'IdleWorkers' + "Total Accesses", + "Total kBytes", + "CPULoad", + "Uptime", + "ReqPerSec", + "BytesPerSec", + "BytesPerReq", + "BusyWorkers", + "IdleWorkers", ] for param in dataorder: try: print(params[param]) except KeyError: # not all Apache's have all stats - print('U') + print("U") # print the scoreboard for state in scoreboardkey: diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index 275d81586..9b677fd3a 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -195,17 +195,17 @@ for (( c=0; c<${#Scoreboard}; c++ )); do done # scoreboard output order must be this ... -echo ${Scoreboard_} -echo ${ScoreboardS} -echo ${ScoreboardR} -echo ${ScoreboardW} -echo ${ScoreboardK} -echo ${ScoreboardD} -echo ${ScoreboardC} -echo ${ScoreboardL} -echo ${ScoreboardG} -echo ${ScoreboardI} -echo ${ScoreboardDot} +echo "${Scoreboard_}" +echo "${ScoreboardS}" +echo "${ScoreboardR}" +echo "${ScoreboardW}" +echo "${ScoreboardK}" +echo "${ScoreboardD}" +echo "${ScoreboardC}" +echo "${ScoreboardL}" +echo "${ScoreboardG}" +echo "${ScoreboardI}" +echo "${ScoreboardDot}" # clean up if [ -f ${Tmp_File} ]; then diff --git a/snmp/backupninja.py b/snmp/backupninja.py index ce9408d67..80cf55f7f 100644 --- a/snmp/backupninja.py +++ b/snmp/backupninja.py @@ -1,39 +1,45 @@ #!/usr/bin/env python3 import io -import re -import os import json +import os +import re version = 1 error = 0 -error_string = '' +error_string = "" -logfile = '/var/log/backupninja.log' +logfile = "/var/log/backupninja.log" backupninja_datas = { - 'last_actions': 0, - 'last_fatal': 0, - 'last_error': 0, - 'last_warning': 0} + "last_actions": 0, + "last_fatal": 0, + "last_error": 0, + "last_warning": 0, +} if not os.path.isfile(logfile): - error_string = 'file unavailable' + error_string = "file unavailable" error = 1 break -with io.open(logfile,'r') as f: +with io.open(logfile, "r") as f: for line in reversed(list(f)): - match = re.search('^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$', line) + match = re.search( + "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", + line, + ) if match: - backupninja_datas['last_actions'] = int(match.group(2)) - backupninja_datas['last_fatal'] = int(match.group(3)) - backupninja_datas['last_error'] = int(match.group(4)) - backupninja_datas['last_warning'] = int(match.group(5)) + backupninja_datas["last_actions"] = int(match.group(2)) + backupninja_datas["last_fatal"] = int(match.group(3)) + backupninja_datas["last_error"] = int(match.group(4)) + backupninja_datas["last_warning"] = int(match.group(5)) break -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': backupninja_datas} +output = { + "version": version, + "error": error, + "errorString": error_string, + "data": backupninja_datas, +} print(json.dumps(output)) diff --git a/snmp/certificate.py b/snmp/certificate.py index d97f66f12..c141afcd1 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -1,12 +1,11 @@ #!/usr/bin/env python3 -import socket -import ssl import datetime import json +import socket +import ssl - -CONFIGFILE='/etc/snmp/certificate.json' +CONFIGFILE = "/etc/snmp/certificate.json" # {"domains": [ # {"fqdn": "www.mydomain.com"}, # {"fqdn": "www2.mydomain.com"} @@ -34,55 +33,61 @@ def get_certificate_data(domain, port=443): # Manage expired certificates except ssl.SSLCertVerificationError as e: # Arbitrary start date - ssl_info['notBefore'] = "Jan 1 00:00:00 2020 GMT" + ssl_info["notBefore"] = "Jan 1 00:00:00 2020 GMT" # End date is now (we don't have the real one but the certificate is expired) one_minute_further = datetime.datetime.now() + datetime.timedelta(minutes=1) - ssl_info['notAfter'] = one_minute_further.strftime('%b %d %H:%M:%S %Y GMT') + ssl_info["notAfter"] = one_minute_further.strftime("%b %d %H:%M:%S %Y GMT") return ssl_info, error_msg output = {} -output['error'] = 0 -output['errorString'] = "" -output['version'] = 1 +output["error"] = 0 +output["errorString"] = "" +output["version"] = 1 -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e -if not output['error']: +if not output["error"]: output_data_list = [] - for domain in configfile['domains']: + for domain in configfile["domains"]: output_data = {} - if 'port' not in domain.keys(): - domain['port'] = 443 - certificate_data, error_msg = get_certificate_data(domain['fqdn'], domain['port']) + if "port" not in domain.keys(): + domain["port"] = 443 + certificate_data, error_msg = get_certificate_data( + domain["fqdn"], domain["port"] + ) - output_data['cert_name'] = domain['fqdn'] + output_data["cert_name"] = domain["fqdn"] if not error_msg: - ssl_date_format = r'%b %d %H:%M:%S %Y %Z' - validity_end = datetime.datetime.strptime(certificate_data['notAfter'], ssl_date_format) - validity_start = datetime.datetime.strptime(certificate_data['notBefore'], ssl_date_format) + ssl_date_format = r"%b %d %H:%M:%S %Y %Z" + validity_end = datetime.datetime.strptime( + certificate_data["notAfter"], ssl_date_format + ) + validity_start = datetime.datetime.strptime( + certificate_data["notBefore"], ssl_date_format + ) cert_age = datetime.datetime.now() - validity_start cert_still_valid = validity_end - datetime.datetime.now() - output_data['age'] = cert_age.days - output_data['remaining_days'] = cert_still_valid.days + output_data["age"] = cert_age.days + output_data["remaining_days"] = cert_still_valid.days else: - output_data['age'] = None - output_data['remaining_days'] = None - output['error'] = 1 - output['errorString'] = "%s: %s" % (domain['fqdn'], error_msg) + output_data["age"] = None + output_data["remaining_days"] = None + output["error"] = 1 + output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) output_data_list.append(output_data) - output['data'] = output_data_list + output["data"] = output_data_list print(json.dumps(output)) diff --git a/snmp/chip.sh b/snmp/chip.sh index 07012d906..4dc2fac05 100644 --- a/snmp/chip.sh +++ b/snmp/chip.sh @@ -18,13 +18,13 @@ BAT_D=0 if [ $STATUS_ACIN == 1 ]; then # ACIN voltage - REG=`i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - ACIN=`echo "$REG*0.0017"|bc` + REG=$(i2cget -y -f 0 0x34 0x56 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + ACIN=$(echo "$REG*0.0017"|bc) # ACIN Current - REG=`i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - ACIN_C=`echo "$REG*0.000625"|bc` + REG=$(i2cget -y -f 0 0x34 0x58 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + ACIN_C=$(echo "$REG*0.000625"|bc) else ACIN=0 ACIN_C=0 @@ -32,14 +32,14 @@ fi if [ $STATUS_VBUS == 1 ]; then # VBUS voltage - REG=`i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBUS=`echo "$REG*0.0017"|bc` + REG=$(i2cget -y -f 0 0x34 0x5A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBUS=$(echo "$REG*0.0017"|bc) # VBUS Current - REG=`i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBUS_C=`echo "$REG*0.000375"|bc` + REG=$(i2cget -y -f 0 0x34 0x5C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBUS_C=$(echo "$REG*0.000375"|bc) else VBUS=0 VBUS_C=0 @@ -47,24 +47,24 @@ fi if [ $STATUS_BATCON == 1 ]; then # Battery Voltage - REG=`i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG=`printf "%d" "$REG"` - VBAT=`echo "$REG*0.0011"|bc` + REG=$(i2cget -y -f 0 0x34 0x78 w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG=$(printf "%d" "$REG") + VBAT=$(echo "$REG*0.0011"|bc) if [ $STATUS_CHG_DIR == 1 ]; then # Battery Charging Current - REG=`i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG_C=`printf "%d" "$REG"` - BAT_C=`echo "scale=2;$REG_C*0.001"|bc` + REG=$(i2cget -y -f 0 0x34 0x7A w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG_C=$(printf "%d" "$REG") + BAT_C=$(echo "scale=2;$REG_C*0.001"|bc) else # Battery Discharge Current - REG=`i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` - REG_D=`printf "%d" "$REG"` - BAT_D=`echo "scale=2;$REG_D*0.001"|bc` + REG=$(i2cget -y -f 0 0x34 0x7C w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') + REG_D=$(printf "%d" "$REG") + BAT_D=$(echo "scale=2;$REG_D*0.001"|bc) fi # Battery % - REG=`i2cget -y -f 0 0x34 0xB9` - BAT_PERCENT=`printf "%d" "$REG"` + REG=$(i2cget -y -f 0 0x34 0xB9) + BAT_PERCENT=$(printf "%d" "$REG") else VBAT=0 BATT_CUR=0 @@ -72,11 +72,11 @@ else fi # Temperature -REG=`i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}'` -REG=`printf "%d" "$REG"` -THERM=`echo "($REG*0.1)-144.7"|bc` +REG=$(i2cget -y -f 0 0x34 0x5E w|awk '{print "0x"substr($0,5,2)substr($0,4,1)}') +REG=$(printf "%d" "$REG") +THERM=$(echo "($REG*0.1)-144.7"|bc) -echo $THERM +echo "$THERM" echo $ACIN echo $ACIN_C echo $VBUS diff --git a/snmp/dhcp.py b/snmp/dhcp.py index 12937370e..532665dd8 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -1,10 +1,10 @@ #!/usr/bin/env python3 -import subprocess import json +import subprocess from os.path import isfile -CONFIGFILE = '/etc/snmp/dhcp.json' +CONFIGFILE = "/etc/snmp/dhcp.json" # Configfile is needed /etc/snmp/dhcp.json # @@ -13,10 +13,10 @@ # error = 0 -error_string = '' +error_string = "" version = 2 -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: @@ -25,75 +25,76 @@ if not error: - leases = {'total': 0, - 'active': 0, - 'expired': 0, - 'released': 0, - 'abandoned': 0, - 'reset': 0, - 'bootp': 0, - 'backup': 0, - 'free': 0, - } - if not isfile(configfile['leasefile']): + leases = { + "total": 0, + "active": 0, + "expired": 0, + "released": 0, + "abandoned": 0, + "reset": 0, + "bootp": 0, + "backup": 0, + "free": 0, + } + if not isfile(configfile["leasefile"]): error = 1 - error_string = 'Lease File not found' + error_string = "Lease File not found" else: - with open(configfile['leasefile']) as fp: + with open(configfile["leasefile"]) as fp: line = fp.readline() while line: line = fp.readline() - if 'rewind' not in line: - if line.startswith('lease'): - leases['total'] += 1 - elif 'binding state active' in line: - leases['active'] += 1 - elif 'binding state expired' in line: - leases['expired'] += 1 - elif 'binding state released' in line: - leases['released'] += 1 - elif 'binding state abandoned' in line: - leases['abandoned'] += 1 - elif 'binding state reset' in line: - leases['reset'] += 1 - elif 'binding state bootp' in line: - leases['bootp'] += 1 - elif 'binding state backup' in line: - leases['backup'] += 1 - elif 'binding state free' in line: - leases['free'] += 1 + if "rewind" not in line: + if line.startswith("lease"): + leases["total"] += 1 + elif "binding state active" in line: + leases["active"] += 1 + elif "binding state expired" in line: + leases["expired"] += 1 + elif "binding state released" in line: + leases["released"] += 1 + elif "binding state abandoned" in line: + leases["abandoned"] += 1 + elif "binding state reset" in line: + leases["reset"] += 1 + elif "binding state bootp" in line: + leases["bootp"] += 1 + elif "binding state backup" in line: + leases["backup"] += 1 + elif "binding state free" in line: + leases["free"] += 1 shell_cmd = "dhcpd-pools -s i -A" -pool_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') +pool_data = ( + subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) + .stdout.read() + .split(b"\n") +) -data = {'leases': leases, - 'pools': [], - 'networks': [], - 'all_networks': [] - } +data = {"leases": leases, "pools": [], "networks": [], "all_networks": []} category = None jump_line = 0 for p in pool_data: - line = p.decode('utf-8') + line = p.decode("utf-8") if jump_line: jump_line -= 1 continue - if line.startswith('Ranges:'): - category = 'pools' + if line.startswith("Ranges:"): + category = "pools" jump_line = 1 continue - if line.startswith('Shared networks:'): - category = 'networks' + if line.startswith("Shared networks:"): + category = "networks" jump_line = 1 continue - if line.startswith('Sum of all ranges:'): - category = 'all_networks' + if line.startswith("Sum of all ranges:"): + category = "all_networks" jump_line = 1 continue @@ -102,34 +103,38 @@ p = line.split() - if category == 'pools': - data[category].append({'first_ip': p[1], - 'last_ip':p[3], - 'max': p[4], - 'cur': p[5], - 'percent': p[6], - }) + if category == "pools": + data[category].append( + { + "first_ip": p[1], + "last_ip": p[3], + "max": p[4], + "cur": p[5], + "percent": p[6], + } + ) continue - if category == 'networks': - data[category].append({'network': p[0], - 'max': p[1], - 'cur': p[2], - 'percent': p[3], - }) + if category == "networks": + data[category].append( + { + "network": p[0], + "max": p[1], + "cur": p[2], + "percent": p[3], + } + ) continue - if category == 'all_networks': - data[category] ={'max': p[2], - 'cur': p[3], - 'percent': p[4], - } + if category == "all_networks": + data[category] = { + "max": p[2], + "cur": p[3], + "percent": p[4], + } continue -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': data} +output = {"version": version, "error": error, "errorString": error_string, "data": data} -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/exim-stats.sh b/snmp/exim-stats.sh index 4b430d549..729205f62 100644 --- a/snmp/exim-stats.sh +++ b/snmp/exim-stats.sh @@ -19,17 +19,17 @@ # ------------------------------------------------------------- # # restart snmpd and activate the app for desired host # ################################################################# -BIN_EXIM=`which exim` -BIN_GREP=`which grep` -BIN_WC=`which wc` +BIN_EXIM=$(which exim) +BIN_GREP=$(which grep) +BIN_WC=$(which wc) CFG_EXIM_1='-bp' CFG_EXIM_2='-bpc' CFG_GREP='frozen' CFG_WC='-l' ################################################################# -FROZEN=`$BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC` -echo $FROZEN +FROZEN=$($BIN_EXIM $CFG_EXIM_1 | $BIN_GREP $CFG_GREP | $BIN_WC $CFG_WC) +echo "$FROZEN" -QUEUE=`$BIN_EXIM $CFG_EXIM_2` -echo $QUEUE \ No newline at end of file +QUEUE=$($BIN_EXIM $CFG_EXIM_2) +echo "$QUEUE" diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index dac7e9980..8a9423b38 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -25,46 +25,46 @@ if [ $AGENT == 1 ]; then echo "<<>>" fi -RESULT=`echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY` +RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo $RESULT | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' diff --git a/snmp/gpsd b/snmp/gpsd index 70fe924c9..eed38c4bc 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -1,45 +1,45 @@ -#!/usr/bin/env bash -# -# Copyright (c) 2019 Mike Centola -# -# Please make sure the paths below are correct. -# Alternatively you can put them in $0.conf, meaning if you've named -# this script gpsd.sh then it must go in gpsd.sh.conf . -# -# -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ - -BIN_GPIPE='/usr/bin/env gpspipe' -BIN_GREP='/usr/bin/env grep' -BIN_PYTHON='/usr/bin/env python' - -# Check for config file -CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG -fi - -# Create Temp File -TMPFILE=$(mktemp) -trap "rm -f $TMPFILE" 0 2 3 15 - -# Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 20 > $TMPFILE - -# Parse Temp file for GPSD Data -VERSION=`cat $TMPFILE | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]'` -GPSDMODE=`cat $TMPFILE | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]'` -HDOP=`cat $TMPFILE | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]'` -VDOP=`cat $TMPFILE | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]'` -LAT=`cat $TMPFILE | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]'` -LONG=`cat $TMPFILE | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]'` -ALT=`cat $TMPFILE | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]'` -SATS=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])'` -SATSUSED=`cat $TMPFILE | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])'` - -# Output info for SNMP Extend -echo '{"data":{"mode":"'$GPSDMODE'", "hdop":"'$HDOP'", "vdop":"'$VDOP'", "latitude":"'$LAT'", "longitude":"'$LONG'", "altitude":"'$ALT'", "satellites":"'$SATS'", "satellites_used":"'$SATSUSED'"}, "error":"0", "errorString":"", "version":"'$VERSION'"}' - -rm $TMPFILE +#!/usr/bin/env bash +# +# Copyright (c) 2019 Mike Centola +# +# Please make sure the paths below are correct. +# Alternatively you can put them in $0.conf, meaning if you've named +# this script gpsd.sh then it must go in gpsd.sh.conf . +# +# +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ + +BIN_GPIPE='/usr/bin/env gpspipe' +BIN_GREP='/usr/bin/env grep' +BIN_PYTHON='/usr/bin/env python' + +# Check for config file +CONFIG=$0".conf" +if [ -f "$CONFIG" ]; then + . "$CONFIG" +fi + +# Create Temp File +TMPFILE=$(mktemp) +trap "rm -f $TMPFILE" 0 2 3 15 + +# Write GPSPIPE Data to Temp File +$BIN_GPIPE -w -n 20 > "$TMPFILE" + +# Parse Temp file for GPSD Data +VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') +GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]') +HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]') +VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]') +LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]') +LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]') +ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]') +SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])') +SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])') + +# Output info for SNMP Extend +echo '{"data":{"mode":"'"$GPSDMODE"'", "hdop":"'"$HDOP"'", "vdop":"'"$VDOP"'", "latitude":"'"$LAT"'", "longitude":"'"$LONG"'", "altitude":"'"$ALT"'", "satellites":"'"$SATS"'", "satellites_used":"'"$SATSUSED"'"}, "error":"0", "errorString":"", "version":"'"$VERSION"'"}' + +rm "$TMPFILE" diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh index c93c6bca0..541c174c8 100644 --- a/snmp/icecast-stats.sh +++ b/snmp/icecast-stats.sh @@ -5,10 +5,10 @@ used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') pid=$(pidof icecast) -total_files=$(ls -l /proc/${pid}/fd | wc -l) +total_files=$(ls -l /proc/"${pid}"/fd | wc -l) -echo "Used Memory="$used_memory -echo "CPU Load="$cpu_load -echo "Open files="$total_files +echo "Used Memory=""$used_memory" +echo "CPU Load=""$cpu_load" +echo "Open files=""$total_files" exit diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix index 354f14fbd..8fd536481 100644 --- a/snmp/mailcow-dockerized-postfix +++ b/snmp/mailcow-dockerized-postfix @@ -21,9 +21,9 @@ # requirements: mailcow-dockerized and pflogsumm # -import subprocess -import re import json +import re +import subprocess # LibreNMS poller interval librenms_poller_interval = 300 @@ -34,37 +34,46 @@ def libre_to_mcd_postfix(libre_seconds): def cli_get_docker_container(): - return subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True).decode('utf8').strip() + return ( + subprocess.check_output("docker ps -qf name=postfix-mailcow", shell=True) + .decode("utf8") + .strip() + ) def cli_command(): - cli_part = "docker logs --since " + libre_to_mcd_postfix(librenms_poller_interval) \ - + "m " + cli_get_docker_container() + "| pflogsumm --smtpd-stats" + cli_part = ( + "docker logs --since " + + libre_to_mcd_postfix(librenms_poller_interval) + + "m " + + cli_get_docker_container() + + "| pflogsumm --smtpd-stats" + ) return cli_part def get_output(): - return subprocess.check_output(cli_command(), shell=True).decode('utf8') + return subprocess.check_output(cli_command(), shell=True).decode("utf8") def output_cleaning(input): - output = re.split('\n', input) + output = re.split("\n", input) return list(filter(None, output)) def entry_generator(input): - entry = re.sub(' +', ':', input.strip().lstrip()) - return entry.split(':') + entry = re.sub(" +", ":", input.strip().lstrip()) + return entry.split(":") # limit our needed output -mcd_postfix_data = get_output().split('messages') -data = mcd_postfix_data[1].split('smtpd') +mcd_postfix_data = get_output().split("messages") +data = mcd_postfix_data[1].split("smtpd") # postfix stats only mcd_postfix_info = data[0] # smtpd stats only -mcd_smtpd_info = data[1].split('Per-Hour Traffic Summary')[0] +mcd_smtpd_info = data[1].split("Per-Hour Traffic Summary")[0] # postfix stats export mcd_postfix = output_cleaning(mcd_postfix_info) @@ -74,17 +83,16 @@ points_label = [] for entry in mcd_postfix: data_labels = entry_generator(entry) - if data_labels[0].find('k') == -1: + if data_labels[0].find("k") == -1: points_data.append(data_labels[0]) else: - data_point = data_labels[0].replace('k', '', 1) + data_point = data_labels[0].replace("k", "", 1) data_point = int(data_point) * 1024 points_data.append(data_point) - points_label.append(re.sub('[^a-zA-Z]+', '', data_labels[1])) + points_label.append(re.sub("[^a-zA-Z]+", "", data_labels[1])) entries = dict(zip(points_label, points_data)) export = {"data": entries, "error": "0", "errorString": "", "version": "1"} -data = re.sub(' ', '', json.dumps(export)) +data = re.sub(" ", "", json.dumps(export)) print(data) - diff --git a/snmp/mailscanner.php b/snmp/mailscanner.php index b80fdb9ff..8a9f3e274 100755 --- a/snmp/mailscanner.php +++ b/snmp/mailscanner.php @@ -17,60 +17,60 @@ /// /////////////////////////////////////////////////////////////////////////////////////// - // START SETTINGS /// - $mailstats = "/opt/librenms/scripts/watchmaillog/watchmaillog_counters"; + $mailstats = '/opt/librenms/scripts/watchmaillog/watchmaillog_counters'; // END SETTINGS /// - /// // DO NOT EDIT BENETH THIS LINE /// /////////////////////////////////////////////////////////////////////////////////////// - function doSNMPv2($vars) { - $stats = array(); - if (file_exists($vars)) { - $data = file($vars); - foreach ($data as $item=>$value) { - if (!empty($value)) { - $temp = explode(':', trim($value)); - if (isset($temp[1])) { - $stats[$temp[0]] = $temp[1]; - } - } - } - } - $var = array(); - $var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : "U"); - $var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : "U"); - $var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : "U"); - $var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : "U"); - $var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : "U"); - $var['spam'] = (isset($stats['spam']) ? $stats['spam'] : "U"); - $var['virus'] = (isset($stats['virus']) ? $stats['virus'] : "U"); - foreach ($var as $item=>$count) { - echo $count."\n"; - } - } - - function clearStats($mailstats) { - if (file_exists($mailstats)) { - $fp = fopen($mailstats, 'w'); - fwrite($fp, "mess_recv:0\n"); - fwrite($fp, "mess_rejected:0\n"); - fwrite($fp, "mess_relay:0\n"); - fwrite($fp, "mess_sent:0\n"); - fwrite($fp, "mess_waiting:0\n"); - fwrite($fp, "spam:0\n"); - fwrite($fp, "virus:0\n"); - fclose($fp); - } - } + function doSNMPv2($vars) + { + $stats = []; + if (file_exists($vars)) { + $data = file($vars); + foreach ($data as $item=>$value) { + if (!empty($value)) { + $temp = explode(':', trim($value)); + if (isset($temp[1])) { + $stats[$temp[0]] = $temp[1]; + } + } + } + } + $var = []; + $var['mess_recv'] = (isset($stats['mess_recv']) ? $stats['mess_recv'] : 'U'); + $var['mess_rejected'] = (isset($stats['mess_rejected']) ? $stats['mess_rejected'] : 'U'); + $var['mess_relay'] = (isset($stats['mess_relay']) ? $stats['mess_relay'] : 'U'); + $var['mess_sent'] = (isset($stats['mess_sent']) ? $stats['mess_sent'] : 'U'); + $var['mess_waiting'] = (isset($stats['mess_waiting']) ? $stats['mess_waiting'] : 'U'); + $var['spam'] = (isset($stats['spam']) ? $stats['spam'] : 'U'); + $var['virus'] = (isset($stats['virus']) ? $stats['virus'] : 'U'); + foreach ($var as $item=>$count) { + echo $count."\n"; + } + } + + function clearStats($mailstats) + { + if (file_exists($mailstats)) { + $fp = fopen($mailstats, 'w'); + fwrite($fp, "mess_recv:0\n"); + fwrite($fp, "mess_rejected:0\n"); + fwrite($fp, "mess_relay:0\n"); + fwrite($fp, "mess_sent:0\n"); + fwrite($fp, "mess_waiting:0\n"); + fwrite($fp, "spam:0\n"); + fwrite($fp, "virus:0\n"); + fclose($fp); + } + } - doSNMPv2($mailstats); - //clearStats($mailstats); + doSNMPv2($mailstats); + //clearStats($mailstats); ?> diff --git a/snmp/mdadm b/snmp/mdadm index b25629266..8565f8d69 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -19,10 +19,10 @@ OUTPUT_DATA='[' # use 'ls' command to check if md blocks exist if $LS /dev/md?* 1> /dev/null 2>&1 ; then for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME $($REALPATH $ARRAY_BLOCKDEVICE)) + RAID="/sys/block/"$($BASENAME $($REALPATH "$ARRAY_BLOCKDEVICE")) # ignore arrays with no slaves - if [ -z "$($LS -1 $RAID/slaves 2> /dev/null)" ] ; then + if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then continue fi # ignore "non existing" arrays @@ -30,27 +30,27 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then continue fi - if [[ $($BASENAME $ARRAY_BLOCKDEVICE) = [[:digit:]] ]] ; then - RAID_NAME=$($BASENAME $RAID) + if [[ $($BASENAME "$ARRAY_BLOCKDEVICE") = [[:digit:]] ]] ; then + RAID_NAME=$($BASENAME "$RAID") else - RAID_NAME=$($BASENAME $ARRAY_BLOCKDEVICE) + RAID_NAME=$($BASENAME "$ARRAY_BLOCKDEVICE") fi - RAID_DEV_LIST=$($LS $RAID/slaves/) - RAID_LEVEL=$($CAT $RAID/md/level) - RAID_DISC_COUNT=$($CAT $RAID/md/raid_disks| cut -d' ' -f1) - RAID_STATE=$($CAT $RAID/md/array_state) - RAID_ACTION=$($CAT $RAID/md/sync_action) - RAID_DEGRADED=$($CAT $RAID/md/degraded) + RAID_DEV_LIST=$($LS "$RAID"/slaves/) + RAID_LEVEL=$($CAT "$RAID"/md/level) + RAID_DISC_COUNT=$($CAT "$RAID"/md/raid_disks| cut -d' ' -f1) + RAID_STATE=$($CAT "$RAID"/md/array_state) + RAID_ACTION=$($CAT "$RAID"/md/sync_action) + RAID_DEGRADED=$($CAT "$RAID"/md/degraded) if [ "$RAID_SYNC_SPEED" = "none" ] ; then RAID_SYNC_SPEED=0 else - let "RAID_SYNC_SPEED=$($CAT $RAID/md/sync_speed)*1024" + let "RAID_SYNC_SPEED=$($CAT "$RAID"/md/sync_speed)*1024" fi - if [ "$($CAT $RAID/md/sync_completed)" != "none" ] ; then - let "RAID_SYNC_COMPLETED=100*$($CAT $RAID/md/sync_completed)" - elif [ $RAID_DEGRADED -eq 1 ] ; then + if [ "$($CAT "$RAID"/md/sync_completed)" != "none" ] ; then + let "RAID_SYNC_COMPLETED=100*$($CAT "$RAID"/md/sync_completed)" + elif [ "$RAID_DEGRADED" -eq 1 ] ; then RAID_SYNC_COMPLETED=0 else RAID_SYNC_COMPLETED=100 @@ -58,7 +58,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then # divide with 2 to size like in /proc/mdstat # and multiply with 1024 to get size in bytes - let "RAID_SIZE=$($CAT $RAID/size)*1024/2" + let "RAID_SIZE=$($CAT "$RAID"/size)*1024/2" RAID_DEVICE_LIST='[' ALL_DEVICE_COUNT=0 @@ -73,7 +73,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID_MISSING_DEVICES='[' for D in $RAID_DEV_LIST ; do - if [ -L $RAID/slaves/$D ] && [ -f $RAID/slaves/$D ] ; then + if [ -L "$RAID"/slaves/"$D" ] && [ -f "$RAID"/slaves/"$D" ] ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' fi done @@ -83,7 +83,7 @@ if $LS /dev/md?* 1> /dev/null 2>&1 ; then RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ $RAID_HOTSPARE_COUNT -lt 0 ] ; then + if [ "$RAID_HOTSPARE_COUNT" -lt 0 ] ; then RAID_HOTSPARE_COUNT=0 fi @@ -115,5 +115,5 @@ OUTPUT='{"data":'$OUTPUT_DATA\ '","errorString":"'$ERROR_STRING\ '","version":"'$VERSION'"}' -echo $OUTPUT +echo "$OUTPUT" diff --git a/snmp/mysql-stats b/snmp/mysql-stats index d191bdbbc..c8f32cca2 100755 --- a/snmp/mysql-stats +++ b/snmp/mysql-stats @@ -1,52 +1,117 @@ #!/usr/bin/env python2 -import warnings import re -warnings.filterwarnings(action="ignore", message='the sets module is deprecated') -import sets -import MySQLdb +import warnings + +warnings.filterwarnings(action="ignore", message="the sets module is deprecated") import base64 -conn = MySQLdb.connect(host='', - user='', - passwd='', - db='') -cursor = conn.cursor () +import MySQLdb +import sets + +conn = MySQLdb.connect(host="", user="", passwd="", db="") +cursor = conn.cursor() -cursor.execute ("SHOW GLOBAL STATUS") + +cursor.execute("SHOW GLOBAL STATUS") rows = cursor.fetchall() datavariables = { - 'Command Counters': ['Com_delete','Com_insert','Com_insert_select','Com_load','Com_replace','Com_replace_select', 'Com_select', 'Com_update', 'Com_update_multi'], - 'Connections': ['max_connections', 'Max_used_connections', 'Aborted_clients', 'Aborted_connects','Threads_connected','Connections'], - 'Files and Tables': ['table_open_cache','Open_files','Open_tables','Opened_tables'], - 'InnoDB Buffer Pool': ['ib_bpool_size','ib_bpool_dbpages', 'ib_bpool_free','ib_bpool_modpages'], - 'InnoDB Buffer Pool Activity': ['ib_bpool_read','ib_bpool_created', 'ib_bpool_written'], - 'InnoDB Insert Buffer': ['ib_ibuf_inserts','ib_ibuf_merged_rec', 'ib_ibuf_merges'], - 'InnoDB IO': ['ib_io_read','ib_io_write','ib_io_log', 'ib_io_fsync'], - 'InnoDB IO Pending': ['ib_iop_log','ib_iop_sync', 'ib_iop_flush_log', 'ib_iop_flush_bpool', 'ib_iop_ibuf_aio','ib_iop_aioread','ib_iop_aiowrite'], - 'InnoDB Log': ['innodb_log_buffer_size','ib_log_flush','ib_log_written'], - 'InnoDB Row Operations': ['Innodb_rows_deleted','Innodb_rows_inserted','Innodb_rows_read','Innodb_rows_updated'], - 'InnoDB Semaphores': ['ib_spin_rounds','ib_spin_waits','ib_os_waits'], - 'InnoDB Transactions': ['ib_tnx'], - 'MyISAM Indexes': ['Key_read_requests','Key_reads','Key_write_requests','Key_writes'], - 'Network Traffic': ['Bytes_received','Bytes_sent'], - 'Query Cache': ['Qcache_queries_in_cache','Qcache_hits','Qcache_inserts','Qcache_not_cached','Qcache_lowmem_prunes'], - 'Query Cache Memory': ['query_cache_size','Qcache_free_memory'], - 'Select Types': ['Select_full_join','Select_full_range_join','Select_range','Select_range_check','Select_scan'], - 'Slow Queries': ['Slow_queries'], - 'Sorts': ['Sort_rows','Sort_range','Sort_merge_passes','Sort_scan'], - 'Table Locks': ['Table_locks_immediate','Table_locks_waited'], - 'Temporary Objects': ['Created_tmp_disk_tables','Created_tmp_tables','Created_tmp_files'] - } + "Command Counters": [ + "Com_delete", + "Com_insert", + "Com_insert_select", + "Com_load", + "Com_replace", + "Com_replace_select", + "Com_select", + "Com_update", + "Com_update_multi", + ], + "Connections": [ + "max_connections", + "Max_used_connections", + "Aborted_clients", + "Aborted_connects", + "Threads_connected", + "Connections", + ], + "Files and Tables": [ + "table_open_cache", + "Open_files", + "Open_tables", + "Opened_tables", + ], + "InnoDB Buffer Pool": [ + "ib_bpool_size", + "ib_bpool_dbpages", + "ib_bpool_free", + "ib_bpool_modpages", + ], + "InnoDB Buffer Pool Activity": [ + "ib_bpool_read", + "ib_bpool_created", + "ib_bpool_written", + ], + "InnoDB Insert Buffer": ["ib_ibuf_inserts", "ib_ibuf_merged_rec", "ib_ibuf_merges"], + "InnoDB IO": ["ib_io_read", "ib_io_write", "ib_io_log", "ib_io_fsync"], + "InnoDB IO Pending": [ + "ib_iop_log", + "ib_iop_sync", + "ib_iop_flush_log", + "ib_iop_flush_bpool", + "ib_iop_ibuf_aio", + "ib_iop_aioread", + "ib_iop_aiowrite", + ], + "InnoDB Log": ["innodb_log_buffer_size", "ib_log_flush", "ib_log_written"], + "InnoDB Row Operations": [ + "Innodb_rows_deleted", + "Innodb_rows_inserted", + "Innodb_rows_read", + "Innodb_rows_updated", + ], + "InnoDB Semaphores": ["ib_spin_rounds", "ib_spin_waits", "ib_os_waits"], + "InnoDB Transactions": ["ib_tnx"], + "MyISAM Indexes": [ + "Key_read_requests", + "Key_reads", + "Key_write_requests", + "Key_writes", + ], + "Network Traffic": ["Bytes_received", "Bytes_sent"], + "Query Cache": [ + "Qcache_queries_in_cache", + "Qcache_hits", + "Qcache_inserts", + "Qcache_not_cached", + "Qcache_lowmem_prunes", + ], + "Query Cache Memory": ["query_cache_size", "Qcache_free_memory"], + "Select Types": [ + "Select_full_join", + "Select_full_range_join", + "Select_range", + "Select_range_check", + "Select_scan", + ], + "Slow Queries": ["Slow_queries"], + "Sorts": ["Sort_rows", "Sort_range", "Sort_merge_passes", "Sort_scan"], + "Table Locks": ["Table_locks_immediate", "Table_locks_waited"], + "Temporary Objects": [ + "Created_tmp_disk_tables", + "Created_tmp_tables", + "Created_tmp_files", + ], +} data = {} for row in rows: data[row[0]] = row[1] cursor = "" -cursor = conn.cursor () -cursor.execute ("SHOW VARIABLES") +cursor = conn.cursor() +cursor.execute("SHOW VARIABLES") rows = cursor.fetchall() for row in rows: @@ -59,73 +124,98 @@ rows = cursor.fetchall() for row in rows: for line in row[2].split("\n"): - ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line) - ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line) - ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line) - ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line) - ib_b_reg = re.match(r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line) - ib_insert_buffer = re.match(r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line) - ib_io = re.match(r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", line) - ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line) - ib_io_p1 = re.match(r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line) - ib_io_p2 = re.match(r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", line) - ib_io_p3 = re.match(r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", line) - ib_log_p1 = re.match(r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line) - ib_log_p2 = re.match(r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line) - ib_semaphore = re.match(r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line) - ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line) + ib_bpool_size = re.match(r"Buffer\spool\ssize\s+(\d+)", line) + ib_bpool_free = re.match(r"Free\sbuffers\s+(\d+)", line) + ib_bpool_dbpages = re.match(r"Database\spages\s+(\d+)", line) + ib_bpool_modpages = re.match(r"Modified\sdb\spages\s+(\d+)", line) + ib_b_reg = re.match( + r"Pages\sread\s(\d+),\screated\s(\d+),\swritten (\d+)", line + ) + ib_insert_buffer = re.match( + r"(\d+)\sinserts,\s(\d+)\smerged\srecs,\s(\d+)", line + ) + ib_io = re.match( + r"(\d+)\sOS\sfile\sreads,\s(\d+)\sOS\sfile\swrites,\s(\d+)\sOS\sfsyncs", + line, + ) + ib_io_log = re.match(r"(\d+)\slog\si\/o's\sdone.*", line) + ib_io_p1 = re.match( + r"Pending\snormal\saio\sreads:\s(\d+),\saio\swrites:\s(\d+),", line + ) + ib_io_p2 = re.match( + r"\s?ibuf\saio\sreads:\s(\d+),\slog\si\/o's:\s(\d+),\ssync\si\/o's:\s(\d+)", + line, + ) + ib_io_p3 = re.match( + r"\s?Pending\sflushes\s\(fsync\)\slog:\s(\d+);\sbuffer\spool:\s(\d+)\s?", + line, + ) + ib_log_p1 = re.match( + r"\s?Log\ssequence\snumber\s([[a-fA-F\d]+)(?: (\d+))?", line + ) + ib_log_p2 = re.match( + r"\s?Log\sflushed\sup\sto\s+([[a-fA-F\d]+)(?: (\d+))?", line + ) + ib_semaphore = re.match( + r"\s?Mutex\sspin\swaits\s(\d+),\srounds\s(\d+),\sOS waits\s(\d+)", line + ) + ib_tnx = re.match(r"\s?Trx\sid\scounter\s([[a-fA-F\d]+)(?: (\d+))?", line) - if ib_bpool_size: - data['ib_bpool_size'] = ib_bpool_size.group(1) - elif ib_bpool_free: - data['ib_bpool_free'] = ib_bpool_free.group(1) - elif ib_bpool_dbpages: - data['ib_bpool_dbpages'] = ib_bpool_dbpages.group(1) - elif ib_bpool_modpages: - data['ib_bpool_modpages'] = ib_bpool_modpages.group(1) - elif ib_insert_buffer: - data['ib_ibuf_inserts'] = ib_insert_buffer.group(1) - data['ib_ibuf_merged_rec'] = ib_insert_buffer.group(2) - data['ib_ibuf_merges'] = ib_insert_buffer.group(3) - elif ib_io: - data['ib_io_read'] = ib_io.group(1) - data['ib_io_write'] = ib_io.group(2) - data['ib_io_fsync'] = ib_io.group(3) - elif ib_io_log: - data['ib_io_log'] = ib_io_log.group(1) - elif ib_io_p1: - data['ib_iop_aioread'] = ib_io_p1.group(1) - data['ib_iop_aiowrite'] = ib_io_p1.group(2) - elif ib_io_p2: - data['ib_iop_ibuf_aio'] = ib_io_p2.group(1) - data['ib_iop_log'] = ib_io_p2.group(2) - data['ib_iop_sync'] = ib_io_p2.group(3) - elif ib_io_p3: - data['ib_iop_flush_log'] = ib_io_p3.group(1) - data['ib_iop_flush_bpool'] = ib_io_p3.group(2) - elif ib_log_p1: - data['ib_log_written'] = ib_log_p1.group(1) - if ib_log_p1.group(2): - data['ib_log_written'] = int(data['ib_log_written']) + int(ib_log_p1.group(2)) - elif ib_log_p2: - data['ib_log_flush'] = ib_log_p2.group(1) - if ib_log_p2.group(2): - data['ib_log_flush'] = int(data['ib_log_flush']) + int(ib_log_p2.group(2)) - elif ib_semaphore: - data['ib_spin_waits'] = ib_semaphore.group(1) - data['ib_spin_rounds'] = ib_semaphore.group(2) - data['ib_os_waits'] = ib_semaphore.group(3) - elif ib_tnx: - data['ib_tnx'] = ib_tnx.group(1) - if ib_tnx.group(2): - data['ib_tnx'] = int(data['ib_tnx']) + int(ib_tnx.group(2)) - elif ib_b_reg: - data['ib_bpool_read'] = ib_b_reg.group(1) - data['ib_bpool_created'] = ib_b_reg.group(2) - data['ib_bpool_written'] = ib_b_reg.group(3) + if ib_bpool_size: + data["ib_bpool_size"] = ib_bpool_size.group(1) + elif ib_bpool_free: + data["ib_bpool_free"] = ib_bpool_free.group(1) + elif ib_bpool_dbpages: + data["ib_bpool_dbpages"] = ib_bpool_dbpages.group(1) + elif ib_bpool_modpages: + data["ib_bpool_modpages"] = ib_bpool_modpages.group(1) + elif ib_insert_buffer: + data["ib_ibuf_inserts"] = ib_insert_buffer.group(1) + data["ib_ibuf_merged_rec"] = ib_insert_buffer.group(2) + data["ib_ibuf_merges"] = ib_insert_buffer.group(3) + elif ib_io: + data["ib_io_read"] = ib_io.group(1) + data["ib_io_write"] = ib_io.group(2) + data["ib_io_fsync"] = ib_io.group(3) + elif ib_io_log: + data["ib_io_log"] = ib_io_log.group(1) + elif ib_io_p1: + data["ib_iop_aioread"] = ib_io_p1.group(1) + data["ib_iop_aiowrite"] = ib_io_p1.group(2) + elif ib_io_p2: + data["ib_iop_ibuf_aio"] = ib_io_p2.group(1) + data["ib_iop_log"] = ib_io_p2.group(2) + data["ib_iop_sync"] = ib_io_p2.group(3) + elif ib_io_p3: + data["ib_iop_flush_log"] = ib_io_p3.group(1) + data["ib_iop_flush_bpool"] = ib_io_p3.group(2) + elif ib_log_p1: + data["ib_log_written"] = ib_log_p1.group(1) + if ib_log_p1.group(2): + data["ib_log_written"] = int(data["ib_log_written"]) + int( + ib_log_p1.group(2) + ) + elif ib_log_p2: + data["ib_log_flush"] = ib_log_p2.group(1) + if ib_log_p2.group(2): + data["ib_log_flush"] = int(data["ib_log_flush"]) + int( + ib_log_p2.group(2) + ) + elif ib_semaphore: + data["ib_spin_waits"] = ib_semaphore.group(1) + data["ib_spin_rounds"] = ib_semaphore.group(2) + data["ib_os_waits"] = ib_semaphore.group(3) + elif ib_tnx: + data["ib_tnx"] = ib_tnx.group(1) + if ib_tnx.group(2): + data["ib_tnx"] = int(data["ib_tnx"]) + int(ib_tnx.group(2)) + elif ib_b_reg: + data["ib_bpool_read"] = ib_b_reg.group(1) + data["ib_bpool_created"] = ib_b_reg.group(2) + data["ib_bpool_written"] = ib_b_reg.group(3) for category in datavariables: for variable in datavariables[category]: - if variable in data: - print data[variable] + if variable in data: + print data[variable] diff --git a/snmp/nginx b/snmp/nginx index e2a64118d..201da897c 100755 --- a/snmp/nginx +++ b/snmp/nginx @@ -1,8 +1,8 @@ #!/usr/bin/env python3 -from urllib.request import urlopen import re +from urllib.request import urlopen -data = urlopen('http://localhost/nginx-status').read() +data = urlopen("http://localhost/nginx-status").read() params = {} @@ -22,7 +22,9 @@ dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) print(Active) else: print(params[param]) diff --git a/snmp/nginx-python2 b/snmp/nginx-python2 index 06efab6e6..fd0c574b5 100755 --- a/snmp/nginx-python2 +++ b/snmp/nginx-python2 @@ -1,28 +1,31 @@ #!/usr/bin/env python2 -import urllib2 import re -data = urllib2.urlopen('http://localhost/nginx-status').read() +import urllib2 + +data = urllib2.urlopen("http://localhost/nginx-status").read() params = {} for line in data.split("\n"): - smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) - req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) - if smallstat: - params["Reading"] = smallstat.group(1) - params["Writing"] = smallstat.group(2) - params["Waiting"] = smallstat.group(3) - elif req: - params["Requests"] = req.group(3) - else: - pass + smallstat = re.match(r"\s?Reading:\s(.*)\sWriting:\s(.*)\sWaiting:\s(.*)$", line) + req = re.match(r"\s+(\d+)\s+(\d+)\s+(\d+)", line) + if smallstat: + params["Reading"] = smallstat.group(1) + params["Writing"] = smallstat.group(2) + params["Waiting"] = smallstat.group(3) + elif req: + params["Requests"] = req.group(3) + else: + pass dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] for param in dataorder: - if param == "Active": - Active = int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) - print Active - else: - print params[param] + if param == "Active": + Active = ( + int(params["Reading"]) + int(params["Writing"]) + int(params["Waiting"]) + ) + print Active + else: + print params[param] diff --git a/snmp/ntp-client b/snmp/ntp-client index 925155abe..0df9ee07b 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -19,17 +19,17 @@ BIN_AWK='/usr/bin/env awk' BIN_HEAD='/usr/bin/env head' CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG +if [ -f "$CONFIG" ]; then + . "$CONFIG" fi -NTP_OFFSET=`$BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_FREQUENCY=`$BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_SYS_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_CLK_JITTER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_WANDER=`$BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}'` -NTP_VERSION=`$BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}'` +NTP_OFFSET=$($BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_FREQUENCY=$($BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_SYS_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_CLK_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_WANDER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_VERSION=$($BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}') -echo '{"data":{"offset":"'$NTP_OFFSET'","frequency":"'$NTP_FREQUENCY'","sys_jitter":"'$NTP_SYS_JITTER'","clk_jitter":"'$NTP_CLK_JITTER'","clk_wander":"'$NTP_WANDER'"},"version":"'$NTP_VERSION'","error":"0","errorString":""}' +echo '{"data":{"offset":"'"$NTP_OFFSET"'","frequency":"'"$NTP_FREQUENCY"'","sys_jitter":"'"$NTP_SYS_JITTER"'","clk_jitter":"'"$NTP_CLK_JITTER"'","clk_wander":"'"$NTP_WANDER"'"},"version":"'"$NTP_VERSION"'","error":"0","errorString":""}' exit 0 diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 5871d0377..6fa2f6908 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -33,67 +33,67 @@ NTPQV="p11" # Don't change anything unless you know what are you doing # ################################################################ CONFIG=$0".conf" -if [ -f $CONFIG ]; then - . $CONFIG +if [ -f "$CONFIG" ]; then + . "$CONFIG" fi VERSION=1 -STRATUM=`$BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2` +STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2) # parse the ntpq info that requires version specific info -NTPQ_RAW=`$BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g'` +NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g') if [ $NTPQV = "p11" ]; then - OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` - FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` - SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` - CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` - CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}'` + OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') + FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') + SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') + CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') + CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $7}') fi if [ $NTPQV = "p1" ]; then - OFFSET=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}'` - FREQUENCY=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}'` - SYS_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}'` - CLK_JITTER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}'` - CLK_WANDER=`echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}'` + OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $2}') + FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') + SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') + CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') + CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') fi -VER=`$BIN_NTPD --version` +VER=$($BIN_NTPD --version) if [ "$VER" = '4.2.6p5' ]; then - USECMD=`echo $BIN_NTPDC -c iostats` + USECMD=$(echo "$BIN_NTPDC" -c iostats) else - USECMD=`echo $BIN_NTPQ -c iostats localhost` + USECMD=$(echo "$BIN_NTPQ" -c iostats localhost) fi -CMD2=`$USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' '` +CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') -TIMESINCERESET=`echo $CMD2 | $BIN_AWK -F ' ' '{print $1}'` -RECEIVEDBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $2}'` -FREERECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $3}'` -USEDRECEIVEBUFFERS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $4}'` -LOWWATERREFILLS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $5}'` -DROPPEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $6}'` -IGNOREDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $7}'` -RECEIVEDPACKETS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $8}'` -PACKETSSENT=`echo $CMD2 | $BIN_AWK -F ' ' '{print $9}'` -PACKETSENDFAILURES=`echo $CMD2 | $BIN_AWK -F ' ' '{print $10}'` -INPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $11}'` -USEFULINPUTWAKEUPS=`echo $CMD2 | $BIN_AWK -F ' ' '{print $12}'` +TIMESINCERESET=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $1}') +RECEIVEDBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $2}') +FREERECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $3}') +USEDRECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $4}') +LOWWATERREFILLS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $5}') +DROPPEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $6}') +IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') +RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') +PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') +PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') +INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') +USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') -echo '{"data":{"offset":"'$OFFSET\ -'","frequency":"'$FREQUENCY\ -'","sys_jitter":"'$SYS_JITTER\ -'","clk_jitter":"'$CLK_JITTER\ -'","clk_wander":"'$CLK_WANDER\ -'","stratum":"'$STRATUM\ -'","time_since_reset":"'$TIMESINCERESET\ -'","receive_buffers":"'$RECEIVEDBUFFERS\ -'","free_receive_buffers":"'$FREERECEIVEBUFFERS\ -'","used_receive_buffers":"'$USEDRECEIVEBUFFERS\ -'","low_water_refills":"'$LOWWATERREFILLS\ -'","dropped_packets":"'$DROPPEDPACKETS\ -'","ignored_packets":"'$IGNOREDPACKETS\ -'","received_packets":"'$RECEIVEDPACKETS\ -'","packets_sent":"'$PACKETSSENT\ -'","packet_send_failures":"'$PACKETSENDFAILURES\ -'","input_wakeups":"'$PACKETSENDFAILURES\ -'","useful_input_wakeups":"'$USEFULINPUTWAKEUPS\ +echo '{"data":{"offset":"'"$OFFSET"\ +'","frequency":"'"$FREQUENCY"\ +'","sys_jitter":"'"$SYS_JITTER"\ +'","clk_jitter":"'"$CLK_JITTER"\ +'","clk_wander":"'"$CLK_WANDER"\ +'","stratum":"'"$STRATUM"\ +'","time_since_reset":"'"$TIMESINCERESET"\ +'","receive_buffers":"'"$RECEIVEDBUFFERS"\ +'","free_receive_buffers":"'"$FREERECEIVEBUFFERS"\ +'","used_receive_buffers":"'"$USEDRECEIVEBUFFERS"\ +'","low_water_refills":"'"$LOWWATERREFILLS"\ +'","dropped_packets":"'"$DROPPEDPACKETS"\ +'","ignored_packets":"'"$IGNOREDPACKETS"\ +'","received_packets":"'"$RECEIVEDPACKETS"\ +'","packets_sent":"'"$PACKETSSENT"\ +'","packet_send_failures":"'"$PACKETSENDFAILURES"\ +'","input_wakeups":"'"$PACKETSENDFAILURES"\ +'","useful_input_wakeups":"'"$USEFULINPUTWAKEUPS"\ '"},"error":"0","errorString":"","version":"'$VERSION'"}' diff --git a/snmp/nvidia b/snmp/nvidia index d9d73a755..8bb900f35 100644 --- a/snmp/nvidia +++ b/snmp/nvidia @@ -17,10 +17,10 @@ sed='/usr/bin/env sed' # 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3 $nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' -lines=`$nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l` +lines=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) # if we are less than 5 then all GPUs were printed -if [ $lines -lt 5 ]; then +if [ "$lines" -lt 5 ]; then exit 0; fi @@ -35,5 +35,5 @@ do loop=0 fi - gpu=`expr $gpu + 1` + gpu=$(expr $gpu + 1) done diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh index fa85e023b..a3302c6bd 100644 --- a/snmp/opensip3-stats.sh +++ b/snmp/opensip3-stats.sh @@ -9,11 +9,11 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag total_files=$(lsof -c opensips | wc -l) -echo $total_memory -echo $used_memory -echo $free_memory -echo $load_average -echo "Open files="$total_files +echo "$total_memory" +echo "$used_memory" +echo "$free_memory" +echo "$load_average" +echo "Open files=""$total_files" exit diff --git a/snmp/opensips-stats.sh b/snmp/opensips-stats.sh index 7127ec5ea..b8f5260a2 100644 --- a/snmp/opensips-stats.sh +++ b/snmp/opensips-stats.sh @@ -9,10 +9,10 @@ load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Averag total_files=$(lsof -c opensips | wc -l) -echo $total_memory -echo $used_memory -echo $free_memory -echo $load_average -echo "Open files="$total_files +echo "$total_memory" +echo "$used_memory" +echo "$free_memory" +echo "$load_average" +echo "Open files=""$total_files" exit diff --git a/snmp/osupdate b/snmp/osupdate index 6e6f8f533..1f4f94852 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -34,56 +34,56 @@ CMD_APK=' version' ################################################################ if command -v zypper &>/dev/null ; then # OpenSUSE - UPDATES=`$BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 2 ]; then + UPDATES=$($BIN_ZYPPER "$CMD_ZYPPER" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-2)); else echo "0"; fi elif command -v dnf &>/dev/null ; then # Fedora - UPDATES=`$BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_DNF "$CMD_DNF" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v pacman &>/dev/null ; then # Arch - UPDATES=`$BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v yum &>/dev/null ; then # CentOS / Redhat - UPDATES=`$BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then + UPDATES=$($BIN_YUM "$CMD_YUM" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else echo "0"; fi elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu - UPDATES=`$BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst'` - if [ $UPDATES -ge 1 ]; then - echo $UPDATES; + UPDATES=$($BIN_APT "$CMD_APT" | $BIN_GREP $CMD_GREP 'Inst') + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; else echo "0"; fi elif command -v pkg &>/dev/null ; then # FreeBSD - UPDATES=`$BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 1 ]; then - echo $UPDATES; + UPDATES=$($BIN_PKG "$CMD_PKG" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; else echo "0"; fi elif command -v apk &>/dev/null ; then # Alpine - UPDATES=`$BIN_APK $CMD_APK | $BIN_WC $CMD_WC` - if [ $UPDATES -ge 2 ]; then + UPDATES=$($BIN_APK "$CMD_APK" | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-1)); else echo "0"; diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index 3eb0e0c50..a4d7a4339 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -78,7 +78,7 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss ${opts} "${url}")) + phpfpm_response=($(curl -Ss "${opts}" "${url}")) [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ @@ -131,16 +131,16 @@ phpfpm_slow_requests=0 exit 1 fi -echo $phpfpm_pool -echo $phpfpm_start_time -echo $phpfpm_start_since -echo $phpfpm_accepted_conn -echo $phpfpm_listen_queue -echo $phpfpm_max_listen_queue -echo $phpfpm_listen_queue_len -echo $phpfpm_idle_processes -echo $phpfpm_active_processes -echo $phpfpm_total_processes -echo $phpfpm_max_active_processes -echo $phpfpm_max_children_reached +echo "$phpfpm_pool" +echo "$phpfpm_start_time" +echo "$phpfpm_start_since" +echo "$phpfpm_accepted_conn" +echo "$phpfpm_listen_queue" +echo "$phpfpm_max_listen_queue" +echo "$phpfpm_listen_queue_len" +echo "$phpfpm_idle_processes" +echo "$phpfpm_active_processes" +echo "$phpfpm_total_processes" +echo "$phpfpm_max_active_processes" +echo "$phpfpm_max_children_reached" echo $phpfpm_slow_requests diff --git a/snmp/pi-hole b/snmp/pi-hole index f5cc5c9ab..342ef105b 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -55,19 +55,19 @@ debug() { echo '[ok] API_URL is set' fi - if [ -z $API_AUTH_KEY ]; then + if [ -z "$API_AUTH_KEY" ]; then echo '[warning] API_AUTH_KEY is not set, some values will not be available' else echo '[ok] API_AUTH_KEY is set' fi - if [ -z ${URL_READ_ONLY} ]; then + if [ -z "${URL_READ_ONLY}" ]; then echo '[error] URL_READ_ONLY is not set' else echo '[ok] URL_READ_ONLY is set' fi - if [ -z ${URL_QUERY_TYPE} ]; then + if [ -z "${URL_QUERY_TYPE}" ]; then echo '[error] URL_QUERY_TYPE is not set' else echo '[ok] URL_QUERY_TYPE not set' @@ -87,20 +87,20 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL$URL_READ_ONLY | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') - echo $GET_STATS | tr " " "\n" + GET_STATS=$(curl -s $API_URL"$URL_READ_ONLY" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') + echo "$GET_STATS" | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL$URL_QUERY_TYPE$API_AUTH_KEY | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') - echo $GET_QUERY_TYPE | tr " " "\n" + GET_QUERY_TYPE=$(curl -s $API_URL"$URL_QUERY_TYPE""$API_AUTH_KEY" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') + echo "$GET_QUERY_TYPE" | tr " " "\n" # Find number of DHCP address in scope and current lease count # case-insensitive compare, just in case :) if [ "${DHCP_ACTIVE,,}" = "true" ]; then # Max IP addresses in scope # Convert IPs to decimal and subtract - IFS="." read -r -a array <<< $DHCP_START + IFS="." read -r -a array <<< "$DHCP_START" DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) - IFS="." read -r -a array <<< $DHCP_END + IFS="." read -r -a array <<< "$DHCP_END" DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL # Current lease count @@ -111,7 +111,7 @@ exportdata() { fi } -if [ -z $* ]; then +if [ -z "$*" ]; then exportdata fi expr "$*" : ".*--help" > /dev/null && usage diff --git a/snmp/postfix-queues b/snmp/postfix-queues index dc1951cc1..1d3491e61 100755 --- a/snmp/postfix-queues +++ b/snmp/postfix-queues @@ -8,6 +8,6 @@ QUEUES="incoming active deferred hold" for i in $QUEUES; do - COUNT=`qshape $i | grep TOTAL | awk '{print $2}'` + COUNT=$(qshape "$i" | grep TOTAL | awk '{print $2}') printf "$COUNT\n" done diff --git a/snmp/powerdns-dnsdist b/snmp/powerdns-dnsdist index 0572fb5cd..48e5d1ce4 100644 --- a/snmp/powerdns-dnsdist +++ b/snmp/powerdns-dnsdist @@ -6,7 +6,7 @@ API_AUTH_USER="admin" API_AUTH_PASS="" API_URL="" API_STATS="jsonstat?command=stats" -TMP_FILE=`/usr/bin/mktemp` +TMP_FILE=$(/usr/bin/mktemp) #/ Description: BASH script to get PowerDNS dnsdist stats #/ Examples: ./powerdns-dnsdist @@ -65,100 +65,100 @@ debug() { exportdata() { # get current data - curl -s -u$API_AUTH_USER:$API_AUTH_PASS $API_URL$API_STATS | jq '.' > $TMP_FILE + curl -s -u$API_AUTH_USER:"$API_AUTH_PASS" "$API_URL""$API_STATS" | jq '.' > "$TMP_FILE" # generate export values - JSON_VALUES=$(cat $TMP_FILE) + JSON_VALUES=$(cat "$TMP_FILE") - STAT_CACHE_HIT=$(echo $JSON_VALUES | jq '."cache-hits"') - echo $STAT_CACHE_HIT + STAT_CACHE_HIT=$(echo "$JSON_VALUES" | jq '."cache-hits"') + echo "$STAT_CACHE_HIT" - STAT_CACHE_MISS=$(echo $JSON_VALUES | jq '."cache-misses"') - echo $STAT_CACHE_MISS + STAT_CACHE_MISS=$(echo "$JSON_VALUES" | jq '."cache-misses"') + echo "$STAT_CACHE_MISS" - STAT_DOWNSTREAM_ERR=$(echo $JSON_VALUES | jq '."downstream-send-errors"') - echo $STAT_DOWNSTREAM_ERR + STAT_DOWNSTREAM_ERR=$(echo "$JSON_VALUES" | jq '."downstream-send-errors"') + echo "$STAT_DOWNSTREAM_ERR" - STAT_DOWNSTREAM_TIMEOUT=$(echo $JSON_VALUES | jq '."downstream-timeouts"') - echo $STAT_DOWNSTREAM_TIMEOUT + STAT_DOWNSTREAM_TIMEOUT=$(echo "$JSON_VALUES" | jq '."downstream-timeouts"') + echo "$STAT_DOWNSTREAM_TIMEOUT" - STAT_DYNAMIC_BLOCK_SIZE=$(echo $JSON_VALUES | jq '."dyn-block-nmg-size"') - echo $STAT_DYNAMIC_BLOCK_SIZE + STAT_DYNAMIC_BLOCK_SIZE=$(echo "$JSON_VALUES" | jq '."dyn-block-nmg-size"') + echo "$STAT_DYNAMIC_BLOCK_SIZE" - STAT_DYNAMIC_BLOCK=$(echo $JSON_VALUES | jq '."dyn-blocked"') - echo $STAT_DYNAMIC_BLOCK + STAT_DYNAMIC_BLOCK=$(echo "$JSON_VALUES" | jq '."dyn-blocked"') + echo "$STAT_DYNAMIC_BLOCK" - STAT_QUERIES_COUNT=$(echo $JSON_VALUES | jq '.queries') - echo $STAT_QUERIES_COUNT + STAT_QUERIES_COUNT=$(echo "$JSON_VALUES" | jq '.queries') + echo "$STAT_QUERIES_COUNT" - STAT_QUERIES_RECURSIVE=$(echo $JSON_VALUES | jq '.rdqueries') - echo $STAT_QUERIES_RECURSIVE + STAT_QUERIES_RECURSIVE=$(echo "$JSON_VALUES" | jq '.rdqueries') + echo "$STAT_QUERIES_RECURSIVE" - STAT_QUERIES_EMPTY=$(echo $JSON_VALUES | jq '."empty-queries"') - echo $STAT_QUERIES_EMPTY + STAT_QUERIES_EMPTY=$(echo "$JSON_VALUES" | jq '."empty-queries"') + echo "$STAT_QUERIES_EMPTY" - STAT_QUERIES_DROP_NO_POLICY=$(echo $JSON_VALUES | jq '."no-policy"') - echo $STAT_QUERIES_DROP_NO_POLICY + STAT_QUERIES_DROP_NO_POLICY=$(echo "$JSON_VALUES" | jq '."no-policy"') + echo "$STAT_QUERIES_DROP_NO_POLICY" - STAT_QUERIES_DROP_NC=$(echo $JSON_VALUES | jq '."noncompliant-queries"') - echo $STAT_QUERIES_DROP_NC + STAT_QUERIES_DROP_NC=$(echo "$JSON_VALUES" | jq '."noncompliant-queries"') + echo "$STAT_QUERIES_DROP_NC" - STAT_QUERIES_DROP_NC_ANSWER=$(echo $JSON_VALUES | jq '."noncompliant-responses"') - echo $STAT_QUERIES_DROP_NC_ANSWER + STAT_QUERIES_DROP_NC_ANSWER=$(echo "$JSON_VALUES" | jq '."noncompliant-responses"') + echo "$STAT_QUERIES_DROP_NC_ANSWER" - STAT_QUERIES_SELF_ANSWER=$(echo $JSON_VALUES | jq '."self-answered"') - echo $STAT_QUERIES_SELF_ANSWER + STAT_QUERIES_SELF_ANSWER=$(echo "$JSON_VALUES" | jq '."self-answered"') + echo "$STAT_QUERIES_SELF_ANSWER" - STAT_QUERIES_SERVFAIL=$(echo $JSON_VALUES | jq '."servfail-responses"') - echo $STAT_QUERIES_SERVFAIL + STAT_QUERIES_SERVFAIL=$(echo "$JSON_VALUES" | jq '."servfail-responses"') + echo "$STAT_QUERIES_SERVFAIL" - STAT_QUERIES_FAILURE=$(echo $JSON_VALUES | jq '."trunc-failures"') - echo $STAT_QUERIES_FAILURE + STAT_QUERIES_FAILURE=$(echo "$JSON_VALUES" | jq '."trunc-failures"') + echo "$STAT_QUERIES_FAILURE" - STAT_QUERIES_ACL_DROPS=$(echo $JSON_VALUES | jq '."acl-drops"') - echo $STAT_QUERIES_ACL_DROPS + STAT_QUERIES_ACL_DROPS=$(echo "$JSON_VALUES" | jq '."acl-drops"') + echo "$STAT_QUERIES_ACL_DROPS" - STAT_RULE_DROP=$(echo $JSON_VALUES | jq '."rule-drop"') - echo $STAT_RULE_DROP + STAT_RULE_DROP=$(echo "$JSON_VALUES" | jq '."rule-drop"') + echo "$STAT_RULE_DROP" - STAT_RULE_NXDOMAIN=$(echo $JSON_VALUES | jq '."rule-nxdomain"') - echo $STAT_RULE_NXDOMAIN + STAT_RULE_NXDOMAIN=$(echo "$JSON_VALUES" | jq '."rule-nxdomain"') + echo "$STAT_RULE_NXDOMAIN" - STAT_RULE_REFUSED=$(echo $JSON_VALUES | jq '."rule-refused"') - echo $STAT_RULE_REFUSED + STAT_RULE_REFUSED=$(echo "$JSON_VALUES" | jq '."rule-refused"') + echo "$STAT_RULE_REFUSED" - STAT_LATENCY_AVG_100=$(echo $JSON_VALUES | jq '."latency-avg100"') - echo $STAT_LATENCY_AVG_100 + STAT_LATENCY_AVG_100=$(echo "$JSON_VALUES" | jq '."latency-avg100"') + echo "$STAT_LATENCY_AVG_100" - STAT_LATENCY_AVG_1000=$(echo $JSON_VALUES | jq '."latency-avg1000"') - echo $STAT_LATENCY_AVG_1000 + STAT_LATENCY_AVG_1000=$(echo "$JSON_VALUES" | jq '."latency-avg1000"') + echo "$STAT_LATENCY_AVG_1000" - STAT_LATENCY_AVG_10000=$(echo $JSON_VALUES | jq '."latency-avg10000"') - echo $STAT_LATENCY_AVG_10000 + STAT_LATENCY_AVG_10000=$(echo "$JSON_VALUES" | jq '."latency-avg10000"') + echo "$STAT_LATENCY_AVG_10000" - STAT_LATENCY_AVG_1000000=$(echo $JSON_VALUES | jq '."latency-avg1000000"') - echo $STAT_LATENCY_AVG_1000000 + STAT_LATENCY_AVG_1000000=$(echo "$JSON_VALUES" | jq '."latency-avg1000000"') + echo "$STAT_LATENCY_AVG_1000000" - STAT_LATENCY_SLOW=$(echo $JSON_VALUES | jq '."latency-slow"') - echo $STAT_LATENCY_SLOW + STAT_LATENCY_SLOW=$(echo "$JSON_VALUES" | jq '."latency-slow"') + echo "$STAT_LATENCY_SLOW" - STAT_LATENCY_0_1=$(echo $JSON_VALUES | jq '."latency0-1"') - echo $STAT_LATENCY_0_1 + STAT_LATENCY_0_1=$(echo "$JSON_VALUES" | jq '."latency0-1"') + echo "$STAT_LATENCY_0_1" - STAT_LATENCY_1_10=$(echo $JSON_VALUES | jq '."latency1-10"') - echo $STAT_LATENCY_1_10 + STAT_LATENCY_1_10=$(echo "$JSON_VALUES" | jq '."latency1-10"') + echo "$STAT_LATENCY_1_10" - STAT_LATENCY_10_50=$(echo $JSON_VALUES | jq '."latency10-50"') - echo $STAT_LATENCY_10_50 + STAT_LATENCY_10_50=$(echo "$JSON_VALUES" | jq '."latency10-50"') + echo "$STAT_LATENCY_10_50" - STAT_LATENCY_50_100=$(echo $JSON_VALUES | jq '."latency50-100"') - echo $STAT_LATENCY_50_100 + STAT_LATENCY_50_100=$(echo "$JSON_VALUES" | jq '."latency50-100"') + echo "$STAT_LATENCY_50_100" - STAT_LATENCY_100_1000=$(echo $JSON_VALUES | jq '."latency100-1000"') - echo $STAT_LATENCY_100_1000 + STAT_LATENCY_100_1000=$(echo "$JSON_VALUES" | jq '."latency100-1000"') + echo "$STAT_LATENCY_100_1000" } -if [ -z $* ]; then +if [ -z "$*" ]; then exportdata fi expr "$*" : ".*--help" > /dev/null && usage diff --git a/snmp/powerdns-recursor b/snmp/powerdns-recursor index d673738bf..64c764c0a 100755 --- a/snmp/powerdns-recursor +++ b/snmp/powerdns-recursor @@ -1,12 +1,13 @@ #!/usr/bin/python -import json, subprocess -from subprocess import Popen, PIPE +import json +import subprocess +from subprocess import PIPE, Popen -input = Popen(['rec_control', 'get-all'], stdout=PIPE).communicate()[0] +input = Popen(["rec_control", "get-all"], stdout=PIPE).communicate()[0] data = [] for line in input.splitlines(): item = line.split() - data.append({'name': item[0].decode(), 'value': int(item[1].decode())}) + data.append({"name": item[0].decode(), "value": int(item[1].decode())}) print(json.dumps(data)) diff --git a/snmp/powerdns.py b/snmp/powerdns.py index 75cc1fae8..088273da7 100755 --- a/snmp/powerdns.py +++ b/snmp/powerdns.py @@ -3,24 +3,26 @@ import json import subprocess -pdnscontrol = '/usr/bin/pdns_control' +pdnscontrol = "/usr/bin/pdns_control" -process = subprocess.Popen([pdnscontrol, 'show', '*'], stdout=subprocess.PIPE, stderr=subprocess.PIPE) +process = subprocess.Popen( + [pdnscontrol, "show", "*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE +) input = process.communicate() stdout = input[0].decode() stderr = input[1].decode() data = {} -for var in stdout.split(','): - if '=' in var: - key, value = var.split('=') +for var in stdout.split(","): + if "=" in var: + key, value = var.split("=") data[key] = value output = { - 'version': 1, - 'error': process.returncode, - 'errorString': stderr, - 'data': data + "version": 1, + "error": process.returncode, + "errorString": stderr, + "data": data, } print(json.dumps(output)) diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index 2b3ed74b6..e280fe710 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -67,22 +67,22 @@ ### Libraries -import os -import sys import getopt import json +import os import re import shutil import subprocess +import sys ### Option defaults -method = "" # must be one of methods array +method = "" # must be one of methods array verbose = False warnings = False -librenms = True # Return results in a JSON format suitable for Librenms - # Set to false to return JSON data only -pretty = False # Pretty printing +librenms = True # Return results in a JSON format suitable for Librenms +# Set to false to return JSON data only +pretty = False # Pretty printing ### Globals @@ -90,40 +90,51 @@ errorString = "" data = {} result = {} -usage = "USAGE: " + os.path.basename(__file__) + " [-h|--help] |" \ - + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" \ - + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +usage = ( + "USAGE: " + + os.path.basename(__file__) + + " [-h|--help] |" + + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" + + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" +) methods = ["sensors", "hpasmcli"] -#costPerkWh = 0.15 # <<<< UNCOMMENT +# costPerkWh = 0.15 # <<<< UNCOMMENT ### General functions + def errorMsg(message): sys.stderr.write("ERROR: " + message + "\n") + def usageError(message="Invalid argument"): errorMsg(message) sys.stderr.write(usage + "\n") sys.exit(1) + def warningMsg(message): if verbose or warnings: sys.stderr.write("WARN: " + message + "\n") + def verboseMsg(message): if verbose: sys.stderr.write("INFO: " + message + "\n") + def listMethods(): global verbose verbose = True - verboseMsg("Available methods are: " + str(methods).strip('[]')) + verboseMsg("Available methods are: " + str(methods).strip("[]")) + ### Data functions + def getData(method): if method == "sensors": - data = getSensorData() + data = getSensorData() elif method == "hpasmcli": data = getHPASMData() @@ -132,6 +143,7 @@ def getData(method): return data + def getSensorData(): global error, errorString error = 2 @@ -139,6 +151,7 @@ def getSensorData(): try: import sensors + sensors.init() except ModuleNotFoundError as e: @@ -173,7 +186,7 @@ def getSensorData(): error = 0 errorString = "" - junk, meter_id = chip_name.split('acpi-', 1) + junk, meter_id = chip_name.split("acpi-", 1) sdata["meter"][meter_id] = {} for feature in chip: @@ -192,91 +205,105 @@ def getSensorData(): sdata[chip_name][feature_label] = feature.get_value() except: - es = sys.exc_info() - error = 1 - errorString = "Unable to get data: General exception: " + str(es) + es = sys.exc_info() + error = 1 + errorString = "Unable to get data: General exception: " + str(es) finally: sensors.cleanup() return sdata + def getHPASMData(): global error, errorString - exe = shutil.which('hpasmcli') - #if not os.access(candidate, os.W_OK): - cmd = [exe, '-s', 'show powermeter; show powersupply'] + exe = shutil.which("hpasmcli") + # if not os.access(candidate, os.W_OK): + cmd = [exe, "-s", "show powermeter; show powersupply"] warningMsg("hpasmcli only runs as root") try: - output = subprocess.run(cmd, capture_output=True, check=True, text=True, timeout=2) + output = subprocess.run( + cmd, capture_output=True, check=True, text=True, timeout=2 + ) except subprocess.CalledProcessError as e: - errorMsg(str(e) + ": " + str(e.stdout).strip('\n')) + errorMsg(str(e) + ": " + str(e.stdout).strip("\n")) sys.exit(1) - rawdata = str(output.stdout).replace('\t', ' ').replace('\n ', '\n').split('\n') + rawdata = str(output.stdout).replace("\t", " ").replace("\n ", "\n").split("\n") hdata = {} hdata["meter"] = {} hdata["psu"] = {} - re_meter = "^Power Meter #([0-9]+)" - re_meter_reading = "^Power Reading :" - re_psu = "^Power supply #[0-9]+" - re_psu_present = "^Present :" + re_meter = "^Power Meter #([0-9]+)" + re_meter_reading = "^Power Reading :" + re_psu = "^Power supply #[0-9]+" + re_psu_present = "^Present :" re_psu_redundant = "^Redundant:" re_psu_condition = "^Condition:" - re_psu_hotplug = "^Hotplug :" - re_psu_reading = "^Power :" + re_psu_hotplug = "^Hotplug :" + re_psu_reading = "^Power :" for line in rawdata: if re.match(re_meter, line): verboseMsg("found power meter: " + line) - junk, meter_id = line.split('#', 1) + junk, meter_id = line.split("#", 1) hdata["meter"][meter_id] = {} elif re.match(re_meter_reading, line): verboseMsg("found power meter reading: " + line) - junk, meter_reading = line.split(':', 1) + junk, meter_reading = line.split(":", 1) hdata["meter"][meter_id]["reading"] = meter_reading.strip() elif re.match(re_psu, line): verboseMsg("found power supply: " + line) - junk, psu_id = line.split('#', 1) + junk, psu_id = line.split("#", 1) hdata["psu"][psu_id] = {} elif re.match(re_psu_present, line): verboseMsg("found power supply present: " + line) - junk, psu_present = line.split(':', 1) + junk, psu_present = line.split(":", 1) hdata["psu"][psu_id]["present"] = psu_present.strip() elif re.match(re_psu_redundant, line): verboseMsg("found power supply redundant: " + line) - junk, psu_redundant = line.split(':', 1) + junk, psu_redundant = line.split(":", 1) hdata["psu"][psu_id]["redundant"] = psu_redundant.strip() elif re.match(re_psu_condition, line): verboseMsg("found power supply condition: " + line) - junk, psu_condition = line.split(':', 1) + junk, psu_condition = line.split(":", 1) hdata["psu"][psu_id]["condition"] = psu_condition.strip() elif re.match(re_psu_hotplug, line): verboseMsg("found power supply hotplug: " + line) - junk, psu_hotplug = line.split(':', 1) + junk, psu_hotplug = line.split(":", 1) hdata["psu"][psu_id]["hotplug"] = psu_hotplug.strip() elif re.match(re_psu_reading, line): verboseMsg("found power supply reading: " + line) - junk, psu_reading = line.split(':', 1) - hdata["psu"][psu_id]["reading"] = psu_reading.replace('Watts', '').strip() + junk, psu_reading = line.split(":", 1) + hdata["psu"][psu_id]["reading"] = psu_reading.replace("Watts", "").strip() return hdata + # Argument Parsing try: opts, args = getopt.gnu_getopt( - sys.argv[1:], 'm:hlNpvw', ['method', 'help', 'list-methods', 'no-librenms', 'pretty', 'verbose', 'warnings'] + sys.argv[1:], + "m:hlNpvw", + [ + "method", + "help", + "list-methods", + "no-librenms", + "pretty", + "verbose", + "warnings", + ], ) if len(args) != 0: usageError("Unknown argument") @@ -336,8 +363,8 @@ def getHPASMData(): data["reading"] = data["meter"]["1"]["reading"] # Example 2 - sum the two power supplies and apply a power factor - #pf = 0.95 - #data["reading"] = str( float(data["psu"]["1"]["reading"]) \ + # pf = 0.95 + # data["reading"] = str( float(data["psu"]["1"]["reading"]) \ # + float(data["psu"]["2"]["reading"]) / pf ) except: @@ -345,13 +372,13 @@ def getHPASMData(): # Build result if librenms: - result['version']=version - result['error']=error - result['errorString']=errorString - result['data']=data + result["version"] = version + result["error"] = error + result["errorString"] = errorString + result["data"] = data else: - result=data + result = data # Print result if pretty: @@ -359,4 +386,3 @@ def getHPASMData(): else: print(json.dumps(result)) - diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 9d0f343cb..9cb64f17b 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -1,17 +1,17 @@ #!/usr/bin/env python3 import json -import yaml from os.path import isfile from time import time +import yaml output = {} -output['error'] = 0 -output['errorString'] = "" -output['version'] = 1 +output["error"] = 0 +output["errorString"] = "" +output["version"] = 1 -CONFIGFILE = '/etc/snmp/puppet.json' +CONFIGFILE = "/etc/snmp/puppet.json" # optional config file # { # "agent": { @@ -20,13 +20,15 @@ # } -summary_files = ['/var/cache/puppet/state/last_run_summary.yaml', - '/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml'] +summary_files = [ + "/var/cache/puppet/state/last_run_summary.yaml", + "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", +] def parse_yaml_file(filename): try: - yaml_data = yaml.load(open(filename, 'r')) + yaml_data = yaml.load(open(filename, "r")) msg = None except yaml.scanner.ScannerError as e: yaml_data = [] @@ -42,7 +44,7 @@ def time_processing(data): new_data = {} for k in data.keys(): - if k == 'last_run': + if k == "last_run": # generate difference to last run (seconds) new_data[k] = round(time() - data[k]) continue @@ -53,36 +55,36 @@ def time_processing(data): def processing(data): new_data = {} - for k in ['changes', 'events', 'resources', 'version']: + for k in ["changes", "events", "resources", "version"]: new_data[k] = data[k] - new_data['time'] = time_processing(data['time']) + new_data["time"] = time_processing(data["time"]) return new_data # extend last_run_summary_file list with optional custom file if isfile(CONFIGFILE): - with open(CONFIGFILE, 'r') as json_file: + with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e else: configfile = None -if not output['error'] and configfile: +if not output["error"] and configfile: try: - if 'agent' in configfile.keys(): - custom_summary_file = configfile['agent']['summary_file'] + if "agent" in configfile.keys(): + custom_summary_file = configfile["agent"]["summary_file"] summary_files.insert(0, custom_summary_file) - except KeyError: - output['error'] = 1 - output['errorString'] = "Configfile Error: '%s'" % e + except KeyError as e: + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % e # search existing summary file from list -if not output['error']: +if not output["error"]: summary_file = None for sum_file in summary_files: if isfile(sum_file): @@ -90,17 +92,17 @@ def processing(data): break if not summary_file: - output['error'] = 1 - output['errorString'] = "no puppet agent run summary file found" + output["error"] = 1 + output["errorString"] = "no puppet agent run summary file found" # open summary file -if not output['error']: +if not output["error"]: msg, data = parse_yaml_file(summary_file) if msg: - output['error'] = 1 - output['errorString'] = msg + output["error"] = 1 + output["errorString"] = msg -output['data'] = processing(data) +output["data"] = processing(data) -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/pureftpd.py b/snmp/pureftpd.py index a2c75672e..f75ec4c7a 100755 --- a/snmp/pureftpd.py +++ b/snmp/pureftpd.py @@ -1,48 +1,61 @@ #!/usr/bin/env python3 -import os import json +import os -CONFIGFILE = '/etc/snmp/pureftpd.json' +CONFIGFILE = "/etc/snmp/pureftpd.json" -pureftpwho_cmd = '/usr/sbin/pure-ftpwho' -pureftpwho_args = '-v -s -n' +pureftpwho_cmd = "/usr/sbin/pure-ftpwho" +pureftpwho_args = "-v -s -n" output_data = {} -output_data['version'] = 1 -output_data['errorString'] = "" -output_data['error'] = 0 +output_data["version"] = 1 +output_data["errorString"] = "" +output_data["error"] = 0 if os.path.isfile(CONFIGFILE): - with open(CONFIGFILE, 'r') as json_file: + with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: - output_data['error'] = 1 - output_data['errorString'] = "Configfile Error: '%s'" % e + output_data["error"] = 1 + output_data["errorString"] = "Configfile Error: '%s'" % e else: configfile = None -if not output_data['error'] and configfile: +if not output_data["error"] and configfile: try: - if 'pureftpwho_cmd' in configfile.keys(): - pureftpwho_cmd = configfile['pureftpwho_cmd'] - except KeyError: - output_data['error'] = 1 - output_data['errorString'] = "Configfile Error: '%s'" % e + if "pureftpwho_cmd" in configfile.keys(): + pureftpwho_cmd = configfile["pureftpwho_cmd"] + except KeyError as e: + output_data["error"] = 1 + output_data["errorString"] = "Configfile Error: '%s'" % e -output = os.popen(pureftpwho_cmd + ' ' + pureftpwho_args).read() +output = os.popen(pureftpwho_cmd + " " + pureftpwho_args).read() data = {} -for line in output.split('\n'): +for line in output.split("\n"): if not len(line): continue - pid, acct, time, state, file, peer, local, port, transfered, total, percent, bandwidth = line.split('|') + ( + pid, + acct, + time, + state, + file, + peer, + local, + port, + transfered, + total, + percent, + bandwidth, + ) = line.split("|") if "IDLE" in state: state = "IDLE" @@ -54,13 +67,11 @@ if acct not in data.keys(): data[acct] = {} if state not in data[acct]: - data[acct][state] = {'bitrate': 0, - 'connections': 0 - } + data[acct][state] = {"bitrate": 0, "connections": 0} bandwidth_bit = int(bandwidth) * 1024 * 8 - data[acct][state]['bitrate'] += bandwidth_bit - data[acct][state]['connections'] += 1 + data[acct][state]["bitrate"] += bandwidth_bit + data[acct][state]["connections"] += 1 -output_data['data'] = data +output_data["data"] = data -print (json.dumps(output_data)) +print(json.dumps(output_data)) diff --git a/snmp/raspberry.sh b/snmp/raspberry.sh index 41f2902a0..404e81528 100755 --- a/snmp/raspberry.sh +++ b/snmp/raspberry.sh @@ -20,27 +20,27 @@ getStatusMJPG='codec_enabled MJPG' getStatusWMV9='codec_enabled WMV9' $picmd $getTemp | $pised 's|[^0-9.]||g' -$picmd $getVoltsCore | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamC | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamI | $pised 's|[^0-9.]||g' -$picmd $getVoltsRamP | $pised 's|[^0-9.]||g' -$picmd $getFreqArm | $pised 's/frequency([0-9]*)=//g' -$picmd $getFreqCore | $pised 's/frequency([0-9]*)=//g' -$picmd $getStatusH264 | $pised 's/H264=//g' -$picmd $getStatusMPG2 | $pised 's/MPG2=//g' -$picmd $getStatusWVC1 | $pised 's/WVC1=//g' -$picmd $getStatusMPG4 | $pised 's/MPG4=//g' -$picmd $getStatusMJPG | $pised 's/MJPG=//g' -$picmd $getStatusWMV9 | $pised 's/WMV9=//g' -$picmd $getStatusH264 | $pised 's/enabled/2/g' -$picmd $getStatusMPG2 | $pised 's/enabled/2/g' -$picmd $getStatusWVC1 | $pised 's/enabled/2/g' -$picmd $getStatusMPG4 | $pised 's/enabled/2/g' -$picmd $getStatusMJPG | $pised 's/enabled/2/g' -$picmd $getStatusWMV9 | $pised 's/enabled/2/g' -$picmd $getStatusH264 | $pised 's/disabled/1/g' -$picmd $getStatusMPG2 | $pised 's/disabled/1/g' -$picmd $getStatusWVC1 | $pised 's/disabled/1/g' -$picmd $getStatusMPG4 | $pised 's/disabled/1/g' -$picmd $getStatusMJPG | $pised 's/disabled/1/g' -$picmd $getStatusWMV9 | $pised 's/disabled/1/g' +$picmd "$getVoltsCore" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamC" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamI" | $pised 's|[^0-9.]||g' +$picmd "$getVoltsRamP" | $pised 's|[^0-9.]||g' +$picmd "$getFreqArm" | $pised 's/frequency([0-9]*)=//g' +$picmd "$getFreqCore" | $pised 's/frequency([0-9]*)=//g' +$picmd "$getStatusH264" | $pised 's/H264=//g' +$picmd "$getStatusMPG2" | $pised 's/MPG2=//g' +$picmd "$getStatusWVC1" | $pised 's/WVC1=//g' +$picmd "$getStatusMPG4" | $pised 's/MPG4=//g' +$picmd "$getStatusMJPG" | $pised 's/MJPG=//g' +$picmd "$getStatusWMV9" | $pised 's/WMV9=//g' +$picmd "$getStatusH264" | $pised 's/enabled/2/g' +$picmd "$getStatusMPG2" | $pised 's/enabled/2/g' +$picmd "$getStatusWVC1" | $pised 's/enabled/2/g' +$picmd "$getStatusMPG4" | $pised 's/enabled/2/g' +$picmd "$getStatusMJPG" | $pised 's/enabled/2/g' +$picmd "$getStatusWMV9" | $pised 's/enabled/2/g' +$picmd "$getStatusH264" | $pised 's/disabled/1/g' +$picmd "$getStatusMPG2" | $pised 's/disabled/1/g' +$picmd "$getStatusWVC1" | $pised 's/disabled/1/g' +$picmd "$getStatusMPG4" | $pised 's/disabled/1/g' +$picmd "$getStatusMJPG" | $pised 's/disabled/1/g' +$picmd "$getStatusWMV9" | $pised 's/disabled/1/g' diff --git a/snmp/redis.py b/snmp/redis.py index 097dda78c..cd861e1f1 100755 --- a/snmp/redis.py +++ b/snmp/redis.py @@ -1,10 +1,14 @@ #!/usr/bin/env python3 -import subprocess import json +import subprocess shell_cmd = "redis-cli info" -all_data = subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE).stdout.read().split(b'\n') +all_data = ( + subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) + .stdout.read() + .split(b"\n") +) version = 1 error = 0 @@ -13,24 +17,24 @@ # stdout list to json try: - category = '' + category = "" for d in all_data: - d = d.replace(b'\r', b'') + d = d.replace(b"\r", b"") - if d in [b'']: + if d in [b""]: continue - if d.startswith(b'#'): - category = d.replace(b'# ', b'').decode("utf-8") + if d.startswith(b"#"): + category = d.replace(b"# ", b"").decode("utf-8") redis_data[category] = {} continue if not len(category): error = 2 - error_string = 'category not defined' + error_string = "category not defined" break - k, v = d.split(b':') + k, v = d.split(b":") k = k.decode("utf-8") v = v.decode("utf-8") @@ -38,11 +42,13 @@ except: error = 1 - error_string = 'data extracting error' + error_string = "data extracting error" -output = {'version': version, - 'error': error, - 'errorString': error_string, - 'data': redis_data} +output = { + "version": version, + "error": error, + "errorString": error_string, + "data": redis_data, +} -print (json.dumps(output)) +print(json.dumps(output)) diff --git a/snmp/sdfsinfo b/snmp/sdfsinfo index 56901ec97..6c83d241d 100644 --- a/snmp/sdfsinfo +++ b/snmp/sdfsinfo @@ -18,9 +18,9 @@ # ################################################################## -SDFSCLI_BIN=`which sdfscli` +SDFSCLI_BIN=$(which sdfscli) SDFSCLI_CMD=' --volume-info' -GREP_BIN=`which grep` +GREP_BIN=$(which grep) GREP_CMD=' -o -E ' -DATAPOINTS=`$SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)"` -echo $DATAPOINTS +DATAPOINTS=$($SDFSCLI_BIN "$SDFSCLI_CMD" | $GREP_BIN "$GREP_CMD" "(([0-9]+)\.?([0-9]+)?)") +echo "$DATAPOINTS" diff --git a/snmp/seafile.py b/snmp/seafile.py index c34cf6e6e..10834dcaa 100755 --- a/snmp/seafile.py +++ b/snmp/seafile.py @@ -14,9 +14,10 @@ # Clients -> plattform (count) # Clients -> version (count) -import requests import json +import requests + # Configfile content example: # {"url": "https://seafile.mydomain.org", # "username": "some_admin_login@mail.address", @@ -25,65 +26,65 @@ # "hide_monitoring_account": true # } -CONFIGFILE='/etc/snmp/seafile.json' +CONFIGFILE = "/etc/snmp/seafile.json" error = 0 -error_string = '' +error_string = "" version = 1 def get_data(url_path, data=None, token=None): - complete_url = "%s/%s" % (url, url_path) - headers = {'Accept': 'application/json'} - if token: - headers['Authorization'] = "Token %s" % token + complete_url = "%s/%s" % (url, url_path) + headers = {"Accept": "application/json"} + if token: + headers["Authorization"] = "Token %s" % token + try: + if token: + r = requests.get(complete_url, data=data, headers=headers) + else: + r = requests.post(complete_url, data=data, headers=headers) try: - if token: - r = requests.get(complete_url, data=data, headers=headers) - else: - r = requests.post(complete_url, data=data, headers=headers) - try: - return r.json() - except json.decoder.JSONDecodeError: - return 'no valid json returned - url correct?' - except requests.exceptions.RequestException as err: - return str(err) + return r.json() + except json.decoder.JSONDecodeError: + return "no valid json returned - url correct?" + except requests.exceptions.RequestException as err: + return str(err) def get_devices(): # get all devices - url_path = 'api/v2.1/admin/devices/' + url_path = "api/v2.1/admin/devices/" return get_data(url_path, token=token) def get_groups(): # get all groups - url_path = 'api/v2.1/admin/groups/' + url_path = "api/v2.1/admin/groups/" return get_data(url_path, token=token) def get_sysinfo(): # get all groups - url_path = 'api/v2.1/admin/sysinfo/' + url_path = "api/v2.1/admin/sysinfo/" return get_data(url_path, token=token) def get_account_information(): # get all accounts withs details account_list = [] - for account in get_data('api2/accounts/', token=token): + for account in get_data("api2/accounts/", token=token): # get account details - url_path = 'api2/accounts/%s/' % account['email'] + url_path = "api2/accounts/%s/" % account["email"] account_data = get_data(url_path, token=token) # get libraries by owner - url_path = 'api/v2.1/admin/libraries/?owner=%s' % account['email'] - account_data['repos'] = get_data(url_path, token=token)['repos'] + url_path = "api/v2.1/admin/libraries/?owner=%s" % account["email"] + account_data["repos"] = get_data(url_path, token=token)["repos"] # get deleted libraries by owner - url_path = 'api/v2.1/admin/trash-libraries/?owner=%s' % account['email'] - account_data['trash_repos'] = get_data(url_path, token=token)['repos'] + url_path = "api/v2.1/admin/trash-libraries/?owner=%s" % account["email"] + account_data["trash_repos"] = get_data(url_path, token=token)["repos"] account_list.append(account_data) return account_list @@ -96,55 +97,53 @@ def resort_devices(device_list): for device in device_list: # don't list information assigned to monitor account if hide_monitoring_account: - if device['user'] == configfile['username']: + if device["user"] == configfile["username"]: continue - if device['platform'] not in platform.keys(): - platform[device['platform']] = 1 + if device["platform"] not in platform.keys(): + platform[device["platform"]] = 1 else: - platform[device['platform']] += 1 + platform[device["platform"]] += 1 - if device['client_version'] not in client_version.keys(): - client_version[device['client_version']] = 1 + if device["client_version"] not in client_version.keys(): + client_version[device["client_version"]] = 1 else: - client_version[device['client_version']] += 1 + client_version[device["client_version"]] += 1 - data['platform'] = [] + data["platform"] = [] for k, v in platform.items(): - data['platform'].append({'os_name': k, - 'clients':v}) - data['client_version'] = [] + data["platform"].append({"os_name": k, "clients": v}) + data["client_version"] = [] for k, v in client_version.items(): - data['client_version'].append({'client_version': k, - 'clients':v}) + data["client_version"].append({"client_version": k, "clients": v}) return data def resort_groups(group_list): - data = {'count': len(group_list)} + data = {"count": len(group_list)} return data def resort_accounts(account_list): - if account_identifier in ['name', 'email']: + if account_identifier in ["name", "email"]: identifier = account_identifier else: - identifier = 'name' + identifier = "name" - accepted_key_list = ['is_active', 'usage'] + accepted_key_list = ["is_active", "usage"] data = [] for user_account in account_list: # don't list information assigned to monitor account if hide_monitoring_account: - if user_account['email'] == configfile['username']: + if user_account["email"] == configfile["username"]: continue new_account = {} - new_account['owner'] = user_account[identifier] - new_account['repos'] = len(user_account['repos']) - new_account['trash_repos'] = len(user_account['trash_repos']) + new_account["owner"] = user_account[identifier] + new_account["repos"] = len(user_account["repos"]) + new_account["trash_repos"] = len(user_account["trash_repos"]) for k in user_account.keys(): if k not in accepted_key_list: @@ -152,11 +151,11 @@ def resort_accounts(account_list): new_account[k] = user_account[k] data.append(new_account) - return sorted(data, key=lambda k: k['owner'].lower()) + return sorted(data, key=lambda k: k["owner"].lower()) # ------------------------ MAIN -------------------------------------------------------- -with open(CONFIGFILE, 'r') as json_file: +with open(CONFIGFILE, "r") as json_file: try: configfile = json.load(json_file) except json.decoder.JSONDecodeError as e: @@ -164,24 +163,24 @@ def resort_accounts(account_list): error_string = "Configfile Error: '%s'" % e if not error: - url = configfile['url'] - username = configfile['username'] - password = configfile['password'] + url = configfile["url"] + username = configfile["username"] + password = configfile["password"] try: - account_identifier = configfile['account_identifier'] + account_identifier = configfile["account_identifier"] except KeyError: account_identifier = None try: - hide_monitoring_account = configfile['hide_monitoring_account'] + hide_monitoring_account = configfile["hide_monitoring_account"] except KeyError: hide_monitoring_account = False # get token - login_data = {'username': username, 'password': password} - ret = get_data('api2/auth-token/', data=login_data) + login_data = {"username": username, "password": password} + ret = get_data("api2/auth-token/", data=login_data) if type(ret) != str: - if 'token' in ret.keys(): - token = ret['token'] + if "token" in ret.keys(): + token = ret["token"] else: error = 1 try: @@ -194,18 +193,13 @@ def resort_accounts(account_list): data = {} if not error: - ret= get_account_information() + ret = get_account_information() if not error: - data['accounts'] = resort_accounts(ret) - data['devices'] = resort_devices(get_devices()['devices']) - data['groups'] = resort_groups(get_groups()['groups']) - data['sysinfo'] = get_sysinfo() + data["accounts"] = resort_accounts(ret) + data["devices"] = resort_devices(get_devices()["devices"]) + data["groups"] = resort_groups(get_groups()["groups"]) + data["sysinfo"] = get_sysinfo() -output = {'error': error, - 'errorString': error_string, - 'version': version, - 'data': data - } +output = {"error": error, "errorString": error_string, "version": version, "data": data} print(json.dumps(output)) - diff --git a/snmp/shoutcast.php b/snmp/shoutcast.php index 4c588355a..637d3abe4 100755 --- a/snmp/shoutcast.php +++ b/snmp/shoutcast.php @@ -18,105 +18,113 @@ /// /////////////////////////////////////////////////////////////////////////////////////// - // START SETTINGS /// - $config = "/opt/librenms/scripts/shoutcast.conf"; - $cache = "/opt/librenms/scripts/shoutcast.cache"; + $config = '/opt/librenms/scripts/shoutcast.conf'; + $cache = '/opt/librenms/scripts/shoutcast.cache'; // END SETTINGS /// - /// // DO NOT EDIT BENETH THIS LINE /// /////////////////////////////////////////////////////////////////////////////////////// - /* Do NOT run this script through a web browser */ - if (!isset($_SERVER["argv"][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) { - die('This script is only meant to run at the command line.'); - } - - $cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : ""); - - function get_data($host, $port) { - $fp = @fsockopen($host, $port, $errno, $errstr, 5); - if(!$fp) { $connect = 0; } - if (!isset($connect)) { - fputs($fp, "GET /7.html HTTP/1.0\r\n" - . "User-Agent: All In One - SHOUTcast Stats Parser" - . " (Mozilla Compatible)\r\n\r\n"); - while (!feof($fp)) { - $rawdata = fgets($fp, 1024); - } - fclose($fp); - } - preg_match('/body>(.*)<\/body/', $rawdata, $matches); - $res = explode(',', $matches[1], 7); - $res[7] = $host; - $res[8] = $port; - return $res; - } - - function get_list($config) { - if (file_exists($config)) { - $servers = file($config); - $data = array(); - foreach ($servers as $item=>$server) { - list($host, $port) = explode(":", $server, 2); - array_push($data, get_data(trim($host), trim($port))); - } - return $data; - } - } - - function doSNMPv2($vars) { - $res = array(); - foreach ($vars as $items=>$server) { - $var = array(); - $var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : "0"); - //$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0"); - $var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : "0"); - $var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : "0"); - $var['current'] = (isset($server['0']) ? $server['0'] : "0"); - $var['status'] = (isset($server['1']) ? $server['1'] : "0"); - $var['peak'] = (isset($server['2']) ? $server['2'] : "0"); - $var['max'] = (isset($server['3']) ? $server['3'] : "0"); - $var['unique'] = (isset($server['4']) ? $server['4'] : "0"); - $host = (isset($server['7']) ? $server['7'] : "unknown"); - $port = (isset($server['8']) ? $server['8'] : "unknown"); - $tmp = $host.":".$port; - foreach ($var as $item=>$value) { - $tmp .= ";".$value; - } - array_push($res, $tmp); - } - return $res; - } - - function makeCacheFile($data, $cache) { - $fp = fopen($cache, 'w'); - foreach ($data as $item=>$value) { - fwrite($fp, $value."\n"); - } - fclose($fp); - } - - function readCacheFile($cache) { - if (file_exists($cache)) { - $data = file($cache); - foreach ($data as $item=>$value) { - echo trim($value)."\n"; - } - } - } - - if ($cmd == "makeCache") { - $servers = get_list($config); - $data = doSNMPv2($servers); - makeCacheFile($data, $cache); - } else { - readCacheFile($cache); - } + /* Do NOT run this script through a web browser */ + if (!isset($_SERVER['argv'][0]) || isset($_SERVER['REQUEST_METHOD']) || isset($_SERVER['REMOTE_ADDR'])) { + exit('This script is only meant to run at the command line.'); + } + + $cmd = (isset($_SERVER['argv'][1]) ? $_SERVER['argv'][1] : ''); + + function get_data($host, $port) + { + $fp = @fsockopen($host, $port, $errno, $errstr, 5); + if (!$fp) { + $connect = 0; + } + if (!isset($connect)) { + fputs($fp, "GET /7.html HTTP/1.0\r\n" + .'User-Agent: All In One - SHOUTcast Stats Parser' + ." (Mozilla Compatible)\r\n\r\n"); + while (!feof($fp)) { + $rawdata = fgets($fp, 1024); + } + fclose($fp); + } + preg_match('/body>(.*)<\/body/', $rawdata, $matches); + $res = explode(',', $matches[1], 7); + $res[7] = $host; + $res[8] = $port; + + return $res; + } + + function get_list($config) + { + if (file_exists($config)) { + $servers = file($config); + $data = []; + foreach ($servers as $item=>$server) { + list($host, $port) = explode(':', $server, 2); + array_push($data, get_data(trim($host), trim($port))); + } + + return $data; + } + } + + function doSNMPv2($vars) + { + $res = []; + foreach ($vars as $items=>$server) { + $var = []; + $var['bitrate'] = (isset($server['5']) ? (($server['5'] / 8) * 1000) : '0'); + //$var['bitrate'] = (isset($server['5']) ? ($server['5'] * 1024) : "0"); + $var['traf_in'] = (isset($server['1']) ? ($var['bitrate'] * $server['1']) : '0'); + $var['traf_out'] = (isset($server['0']) ? ($var['bitrate'] * $server['0']) : '0'); + $var['current'] = (isset($server['0']) ? $server['0'] : '0'); + $var['status'] = (isset($server['1']) ? $server['1'] : '0'); + $var['peak'] = (isset($server['2']) ? $server['2'] : '0'); + $var['max'] = (isset($server['3']) ? $server['3'] : '0'); + $var['unique'] = (isset($server['4']) ? $server['4'] : '0'); + $host = (isset($server['7']) ? $server['7'] : 'unknown'); + $port = (isset($server['8']) ? $server['8'] : 'unknown'); + $tmp = $host.':'.$port; + foreach ($var as $item=>$value) { + $tmp .= ';'.$value; + } + array_push($res, $tmp); + } + + return $res; + } + + function makeCacheFile($data, $cache) + { + $fp = fopen($cache, 'w'); + foreach ($data as $item=> $value) { + fwrite($fp, $value."\n"); + } + fclose($fp); + } + + function readCacheFile($cache) + { + if (file_exists($cache)) { + $data = file($cache); + foreach ($data as $item=>$value) { + echo trim($value)."\n"; + } + } + } + + if ($cmd == 'makeCache') { + $servers = get_list($config); + $data = doSNMPv2($servers); + makeCacheFile($data, $cache); + } else { + readCacheFile($cache); + } ?> diff --git a/snmp/ups-apcups.sh b/snmp/ups-apcups.sh index 0e41a14e1..64b55c30f 100755 --- a/snmp/ups-apcups.sh +++ b/snmp/ups-apcups.sh @@ -17,13 +17,13 @@ BIN_GREP='/usr/bin/grep' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -TMP=`$BIN_APCS 2>/dev/null` +TMP=$($BIN_APCS 2>/dev/null) for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" do - OUT=`echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo $value | $BIN_CUT -d ":" -f 2` + OUT=$(echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo "$value" | $BIN_CUT -d ":" -f 2) if [ -n "$OUT" ]; then - echo $OUT + echo "$OUT" else echo "Unknown" fi diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 7e3d8a15a..7fa5a0ba3 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -16,9 +16,9 @@ TMP=$(upsc $UPS_NAME 2>/dev/null) for value in "battery\.charge: [0-9.]+" "battery\.(runtime\.)?low: [0-9]+" "battery\.runtime: [0-9]+" "battery\.voltage: [0-9.]+" "battery\.voltage\.nominal: [0-9]+" "input\.voltage\.nominal: [0-9.]+" "input\.voltage: [0-9.]+" "ups\.load: [0-9.]+" do - OUT=$(echo $TMP | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1) + OUT=$(echo "$TMP" | grep -Eo "$value" | awk '{print $2}' | LANG=C sort | head -n 1) if [ -n "$OUT" ]; then - echo $OUT + echo "$OUT" else echo "Unknown" fi @@ -26,11 +26,11 @@ done for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" do - UNKNOWN=$(echo $TMP | grep -Eo "ups\.status:") + UNKNOWN=$(echo "$TMP" | grep -Eo "ups\.status:") if [ -z "$UNKNOWN" ]; then echo "Unknown" else - OUT=$(echo $TMP | grep -Eo "$value") + OUT=$(echo "$TMP" | grep -Eo "$value") if [ -n "$OUT" ]; then echo "1" else diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh index 671a04af9..1dcab4d9f 100644 --- a/snmp/voipmon-stats.sh +++ b/snmp/voipmon-stats.sh @@ -5,9 +5,9 @@ used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') pid=$(pidof voipmonitor) -total_files=$(ls -l /proc/${pid}/fd | wc -l) +total_files=$(ls -l /proc/"${pid}"/fd | wc -l) -echo "Used Memory="$used_memory -echo "CPU Load="$cpu_load -echo "Open files="$total_files +echo "Used Memory=""$used_memory" +echo "CPU Load=""$cpu_load" +echo "Open files=""$total_files" exit diff --git a/snmp/zfs-freebsd.py b/snmp/zfs-freebsd.py index d32e959a1..4ebd6d683 100644 --- a/snmp/zfs-freebsd.py +++ b/snmp/zfs-freebsd.py @@ -5,124 +5,187 @@ import json import subprocess -SYSCTL = '/sbin/sysctl' -ZPOOL = '/usr/local/sbin/zpool' +SYSCTL = "/sbin/sysctl" +ZPOOL = "/usr/local/sbin/zpool" + def percent(numerator, denominator, default=0): - try: - return numerator / denominator * 100 - except ZeroDivisionError: - return default + try: + return numerator / denominator * 100 + except ZeroDivisionError: + return default + def main(args): - p = subprocess.run([SYSCTL, '-q', 'kstat.zfs', 'vfs.zfs'], stdout=subprocess.PIPE, universal_newlines=True) - - if p.returncode != 0: - return p.returncode - - def chomp(line): - bits = [b.strip() for b in line.split(':')] - try: - return bits[0], int(bits[1]) - except ValueError: - return bits[0], bits[1] - - stats = dict(chomp(l) for l in p.stdout.splitlines() if l) - if 'kstat.zfs.misc.arcstats.recycle_miss' not in stats: - stats['kstat.zfs.misc.arcstats.recycle_miss'] = 0 - - output = dict() - - # ARC misc - output['deleted'] = stats['kstat.zfs.misc.arcstats.deleted'] - output['evict_skip'] = stats['kstat.zfs.misc.arcstats.evict_skip'] - output['mutex_skip'] = stats['kstat.zfs.misc.arcstats.mutex_miss'] - output['recycle_miss'] = stats['kstat.zfs.misc.arcstats.recycle_miss'] - - # ARC size - output['target_size_per'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - output['arc_size_per'] = stats['kstat.zfs.misc.arcstats.size'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - output['target_size_arat'] = stats['kstat.zfs.misc.arcstats.c'] / stats['kstat.zfs.misc.arcstats.c_max'] - output['min_size_per'] = stats['kstat.zfs.misc.arcstats.c_min'] / stats['kstat.zfs.misc.arcstats.c_max'] * 100 - - output['arc_size'] = stats['kstat.zfs.misc.arcstats.size'] - output['target_size_max'] = stats['kstat.zfs.misc.arcstats.c_max'] - output['target_size_min'] = stats['kstat.zfs.misc.arcstats.c_min'] - output['target_size'] = stats['kstat.zfs.misc.arcstats.c'] - - # ARC size breakdown - output['mfu_size'] = stats['kstat.zfs.misc.arcstats.size'] - stats['kstat.zfs.misc.arcstats.p'] - output['p'] = stats['kstat.zfs.misc.arcstats.p'] - output['rec_used_per'] = stats['kstat.zfs.misc.arcstats.p'] / stats['kstat.zfs.misc.arcstats.size'] * 100 - output['freq_used_per'] = output['mfu_size'] / stats['kstat.zfs.misc.arcstats.size'] * 100 - - # ARC misc efficiency stats - output['arc_hits'] = stats['kstat.zfs.misc.arcstats.hits'] - output['arc_misses'] = stats['kstat.zfs.misc.arcstats.misses'] - output['demand_data_hits'] = stats['kstat.zfs.misc.arcstats.demand_data_hits'] - output['demand_data_misses'] = stats['kstat.zfs.misc.arcstats.demand_data_misses'] - output['demand_meta_hits'] = stats['kstat.zfs.misc.arcstats.demand_metadata_hits'] - output['demand_meta_misses'] = stats['kstat.zfs.misc.arcstats.demand_metadata_misses'] - output['mfu_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mfu_ghost_hits'] - output['mfu_hits'] = stats['kstat.zfs.misc.arcstats.mfu_hits'] - output['mru_ghost_hits'] = stats['kstat.zfs.misc.arcstats.mru_ghost_hits'] - output['mru_hits'] = stats['kstat.zfs.misc.arcstats.mru_hits'] - output['pre_data_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_data_hits'] - output['pre_data_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_data_misses'] - output['pre_meta_hits'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_hits'] - output['pre_meta_misses'] = stats['kstat.zfs.misc.arcstats.prefetch_metadata_misses'] - - output['anon_hits'] = output['arc_hits'] - (output['mfu_hits'] + output['mru_hits'] + output['mfu_ghost_hits'] + output['mru_ghost_hits']) - output['arc_accesses_total'] = output['arc_hits'] + output['arc_misses'] - output['demand_data_total'] = output['demand_data_hits'] + output['demand_data_misses'] - output['pre_data_total'] = output['pre_data_hits'] + output['pre_data_misses'] - output['real_hits'] = output['mfu_hits'] + output['mru_hits'] - - # ARC efficiency percents - output['cache_hits_per'] = percent(output['arc_hits'], output['arc_accesses_total']) - output['cache_miss_per'] = percent(output['arc_misses'], output['arc_accesses_total']) - output['actual_hit_per'] = percent(output['real_hits'], output['arc_accesses_total']) - output['data_demand_per'] = percent(output['demand_data_hits'], output['demand_data_total']) - output['data_pre_per'] = percent(output['pre_data_hits'], output['pre_data_total']) - output['anon_hits_per'] = percent(output['anon_hits'], output['arc_hits']) - output['mru_per'] = percent(output['mru_hits'], output['arc_hits']) - output['mfu_per'] = percent(output['mfu_hits'], output['arc_hits']) - output['mru_ghost_per'] = percent(output['mru_ghost_hits'], output['arc_hits']) - output['mfu_ghost_per'] = percent(output['mfu_ghost_hits'], output['arc_hits']) - output['demand_hits_per'] = percent(output['demand_data_hits'], output['arc_hits']) - output['pre_hits_per'] = percent(output['pre_data_hits'], output['arc_hits']) - output['meta_hits_per'] = percent(output['demand_meta_hits'], output['arc_hits']) - output['pre_meta_hits_per'] = percent(output['pre_meta_hits'], output['arc_hits']) - output['demand_misses_per'] = percent(output['demand_data_misses'], output['arc_misses']) - output['pre_misses_per'] = percent(output['pre_data_misses'], output['arc_misses']) - output['meta_misses_per'] = percent(output['demand_meta_misses'], output['arc_misses']) - output['pre_meta_misses_per'] = percent(output['pre_meta_misses'], output['arc_misses']) - - # pools - p = subprocess.run([ZPOOL, 'list', '-pH'], stdout=subprocess.PIPE, universal_newlines=True) - if p.returncode != 0: - return p.returncode - output['pools'] = [] - fields = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup'] - for l in p.stdout.splitlines(): - p = dict(zip(fields, l.split('\t'))) - if p['ckpoint'] == '-': - p['ckpoint'] = 0 - if p['expandsz'] == '-': - p['expandsz'] = 0 - p['frag'] = p['frag'].rstrip('%') - if p['frag'] == '-': - p['frag'] = 0 - p['cap'] = p['cap'].rstrip('%') - if p['cap'] == '-': - p['cap'] = 0 - p['dedup'] = p['dedup'].rstrip('x') - output['pools'].append(p) - - print(json.dumps(output)) - - return 0 - -if __name__ == '__main__': - import sys - sys.exit(main(sys.argv[1:])) + p = subprocess.run( + [SYSCTL, "-q", "kstat.zfs", "vfs.zfs"], + stdout=subprocess.PIPE, + universal_newlines=True, + ) + + if p.returncode != 0: + return p.returncode + + def chomp(line): + bits = [b.strip() for b in line.split(":")] + try: + return bits[0], int(bits[1]) + except ValueError: + return bits[0], bits[1] + + stats = dict(chomp(line) for line in p.stdout.splitlines() if line) + if "kstat.zfs.misc.arcstats.recycle_miss" not in stats: + stats["kstat.zfs.misc.arcstats.recycle_miss"] = 0 + + output = dict() + + # ARC misc + output["deleted"] = stats["kstat.zfs.misc.arcstats.deleted"] + output["evict_skip"] = stats["kstat.zfs.misc.arcstats.evict_skip"] + output["mutex_skip"] = stats["kstat.zfs.misc.arcstats.mutex_miss"] + output["recycle_miss"] = stats["kstat.zfs.misc.arcstats.recycle_miss"] + + # ARC size + output["target_size_per"] = ( + stats["kstat.zfs.misc.arcstats.c"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + output["arc_size_per"] = ( + stats["kstat.zfs.misc.arcstats.size"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + output["target_size_arat"] = ( + stats["kstat.zfs.misc.arcstats.c"] / stats["kstat.zfs.misc.arcstats.c_max"] + ) + output["min_size_per"] = ( + stats["kstat.zfs.misc.arcstats.c_min"] + / stats["kstat.zfs.misc.arcstats.c_max"] + * 100 + ) + + output["arc_size"] = stats["kstat.zfs.misc.arcstats.size"] + output["target_size_max"] = stats["kstat.zfs.misc.arcstats.c_max"] + output["target_size_min"] = stats["kstat.zfs.misc.arcstats.c_min"] + output["target_size"] = stats["kstat.zfs.misc.arcstats.c"] + + # ARC size breakdown + output["mfu_size"] = ( + stats["kstat.zfs.misc.arcstats.size"] - stats["kstat.zfs.misc.arcstats.p"] + ) + output["p"] = stats["kstat.zfs.misc.arcstats.p"] + output["rec_used_per"] = ( + stats["kstat.zfs.misc.arcstats.p"] / stats["kstat.zfs.misc.arcstats.size"] * 100 + ) + output["freq_used_per"] = ( + output["mfu_size"] / stats["kstat.zfs.misc.arcstats.size"] * 100 + ) + + # ARC misc efficiency stats + output["arc_hits"] = stats["kstat.zfs.misc.arcstats.hits"] + output["arc_misses"] = stats["kstat.zfs.misc.arcstats.misses"] + output["demand_data_hits"] = stats["kstat.zfs.misc.arcstats.demand_data_hits"] + output["demand_data_misses"] = stats["kstat.zfs.misc.arcstats.demand_data_misses"] + output["demand_meta_hits"] = stats["kstat.zfs.misc.arcstats.demand_metadata_hits"] + output["demand_meta_misses"] = stats[ + "kstat.zfs.misc.arcstats.demand_metadata_misses" + ] + output["mfu_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mfu_ghost_hits"] + output["mfu_hits"] = stats["kstat.zfs.misc.arcstats.mfu_hits"] + output["mru_ghost_hits"] = stats["kstat.zfs.misc.arcstats.mru_ghost_hits"] + output["mru_hits"] = stats["kstat.zfs.misc.arcstats.mru_hits"] + output["pre_data_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_data_hits"] + output["pre_data_misses"] = stats["kstat.zfs.misc.arcstats.prefetch_data_misses"] + output["pre_meta_hits"] = stats["kstat.zfs.misc.arcstats.prefetch_metadata_hits"] + output["pre_meta_misses"] = stats[ + "kstat.zfs.misc.arcstats.prefetch_metadata_misses" + ] + + output["anon_hits"] = output["arc_hits"] - ( + output["mfu_hits"] + + output["mru_hits"] + + output["mfu_ghost_hits"] + + output["mru_ghost_hits"] + ) + output["arc_accesses_total"] = output["arc_hits"] + output["arc_misses"] + output["demand_data_total"] = ( + output["demand_data_hits"] + output["demand_data_misses"] + ) + output["pre_data_total"] = output["pre_data_hits"] + output["pre_data_misses"] + output["real_hits"] = output["mfu_hits"] + output["mru_hits"] + + # ARC efficiency percents + output["cache_hits_per"] = percent(output["arc_hits"], output["arc_accesses_total"]) + output["cache_miss_per"] = percent( + output["arc_misses"], output["arc_accesses_total"] + ) + output["actual_hit_per"] = percent( + output["real_hits"], output["arc_accesses_total"] + ) + output["data_demand_per"] = percent( + output["demand_data_hits"], output["demand_data_total"] + ) + output["data_pre_per"] = percent(output["pre_data_hits"], output["pre_data_total"]) + output["anon_hits_per"] = percent(output["anon_hits"], output["arc_hits"]) + output["mru_per"] = percent(output["mru_hits"], output["arc_hits"]) + output["mfu_per"] = percent(output["mfu_hits"], output["arc_hits"]) + output["mru_ghost_per"] = percent(output["mru_ghost_hits"], output["arc_hits"]) + output["mfu_ghost_per"] = percent(output["mfu_ghost_hits"], output["arc_hits"]) + output["demand_hits_per"] = percent(output["demand_data_hits"], output["arc_hits"]) + output["pre_hits_per"] = percent(output["pre_data_hits"], output["arc_hits"]) + output["meta_hits_per"] = percent(output["demand_meta_hits"], output["arc_hits"]) + output["pre_meta_hits_per"] = percent(output["pre_meta_hits"], output["arc_hits"]) + output["demand_misses_per"] = percent( + output["demand_data_misses"], output["arc_misses"] + ) + output["pre_misses_per"] = percent(output["pre_data_misses"], output["arc_misses"]) + output["meta_misses_per"] = percent( + output["demand_meta_misses"], output["arc_misses"] + ) + output["pre_meta_misses_per"] = percent( + output["pre_meta_misses"], output["arc_misses"] + ) + + # pools + p = subprocess.run( + [ZPOOL, "list", "-pH"], stdout=subprocess.PIPE, universal_newlines=True + ) + if p.returncode != 0: + return p.returncode + output["pools"] = [] + fields = [ + "name", + "size", + "alloc", + "free", + "ckpoint", + "expandsz", + "frag", + "cap", + "dedup", + ] + for l in p.stdout.splitlines(): + p = dict(zip(fields, l.split("\t"))) + if p["ckpoint"] == "-": + p["ckpoint"] = 0 + if p["expandsz"] == "-": + p["expandsz"] = 0 + p["frag"] = p["frag"].rstrip("%") + if p["frag"] == "-": + p["frag"] = 0 + p["cap"] = p["cap"].rstrip("%") + if p["cap"] == "-": + p["cap"] = 0 + p["dedup"] = p["dedup"].rstrip("x") + output["pools"].append(p) + + print(json.dumps(output)) + + return 0 + + +if __name__ == "__main__": + import sys + + sys.exit(main(sys.argv[1:])) diff --git a/snmp/zfs-linux b/snmp/zfs-linux index 638705c51..e1fb67126 100755 --- a/snmp/zfs-linux +++ b/snmp/zfs-linux @@ -2,64 +2,70 @@ import json import subprocess + def proc_err(cmd, proc): # output process error and first line of error code return "{}{}".format( subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), - " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "", ) + def main(args): - LINUX = '/proc/spl/kstat/zfs/arcstats' - BSD1 = 'sysctl' - BSD2 = 'kstat.zfs.misc.arcstats' - ILLUMOS = 'kstat -n arcstats' + LINUX = "/proc/spl/kstat/zfs/arcstats" + BSD1 = "sysctl" + BSD2 = "kstat.zfs.misc.arcstats" + ILLUMOS = "kstat -n arcstats" COLUMN = 1 SPLIT = None res = {} try: - LINES = open(LINUX, 'r').readlines() + LINES = open(LINUX, "r").readlines() COLUMN = 2 except IOError as e1: try: - proc = subprocess.run([BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run( + [BSD1, BSD2], stdout=subprocess.PIPE, universal_newlines=True + ) LINES = proc.stdout.splitlines() - LINES = [x[len(BSD2)+1:] for x in LINES] - SPLIT = ':' + LINES = [x[len(BSD2) + 1 :] for x in LINES] + SPLIT = ":" except FileNotFoundError as e2: try: - proc = subprocess.run(ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True) + proc = subprocess.run( + ILLUMOS.split(), stdout=subprocess.PIPE, universal_newlines=True + ) LINES = proc.stdout.splitlines() except FileNotFoundError as e3: - print('Linux :', e1) - print('BSD :', e2) - print('Illumos:', e3) + print("Linux :", e1) + print("BSD :", e2) + print("Illumos:", e3) return 1 - + LINES = [x.strip() for x in LINES] - + STATS = {} for line in LINES[2:]: splitline = line.split(SPLIT) try: - STATS[splitline[0]] = int(splitline[COLUMN]) + STATS[splitline[0]] = int(splitline[COLUMN]) # Skip non int value like Illumos crtime, empty line at the end except: continue - + # ARC misc - DELETED = STATS['deleted'] - EVICT_SKIP = STATS['evict_skip'] - MUTEX_SKIP = STATS['mutex_miss'] - RECYCLE_MISS = STATS['recycle_miss'] if 'recycle_miss' in STATS else 0 + DELETED = STATS["deleted"] + EVICT_SKIP = STATS["evict_skip"] + MUTEX_SKIP = STATS["mutex_miss"] + RECYCLE_MISS = STATS["recycle_miss"] if "recycle_miss" in STATS else 0 # ARC size - ARC_SIZE = STATS['size'] - TARGET_SIZE_MAX = STATS['c_max'] - TARGET_SIZE_MIN = STATS['c_min'] - TARGET_SIZE = STATS['c'] + ARC_SIZE = STATS["size"] + TARGET_SIZE_MAX = STATS["c_max"] + TARGET_SIZE_MIN = STATS["c_min"] + TARGET_SIZE = STATS["c"] TARGET_SIZE_PERCENT = TARGET_SIZE / TARGET_SIZE_MAX * 100 ARC_SIZE_PERCENT = ARC_SIZE / TARGET_SIZE_MAX * 100 @@ -70,7 +76,7 @@ def main(args): MFU_SIZE = 0 RECENTLY_USED_PERCENT = 0 FREQUENTLY_USED_PERCENT = 0 - P = STATS['p'] + P = STATS["p"] if ARC_SIZE >= TARGET_SIZE: MFU_SIZE = ARC_SIZE - P @@ -81,22 +87,21 @@ def main(args): RECENTLY_USED_PERCENT = P / TARGET_SIZE * 100 FREQUENTLY_USED_PERCENT = MFU_SIZE / TARGET_SIZE * 100 - # ARC misc. efficient stats - ARC_HITS = STATS['hits'] - ARC_MISSES = STATS['misses'] - DEMAND_DATA_HITS = STATS['demand_data_hits'] - DEMAND_DATA_MISSES = STATS['demand_data_misses'] - DEMAND_METADATA_HITS = STATS['demand_metadata_hits'] - DEMAND_METADATA_MISSES = STATS['demand_metadata_misses'] - MFU_GHOST_HITS = STATS['mfu_ghost_hits'] - MFU_HITS = STATS['mfu_hits'] - MRU_GHOST_HITS = STATS['mru_ghost_hits'] - MRU_HITS = STATS['mru_hits'] - PREFETCH_DATA_HITS = STATS['prefetch_data_hits'] - PREFETCH_DATA_MISSES = STATS['prefetch_data_misses'] - PREFETCH_METADATA_HITS = STATS['prefetch_metadata_hits'] - PREFETCH_METADATA_MISSES = STATS['prefetch_metadata_misses'] + ARC_HITS = STATS["hits"] + ARC_MISSES = STATS["misses"] + DEMAND_DATA_HITS = STATS["demand_data_hits"] + DEMAND_DATA_MISSES = STATS["demand_data_misses"] + DEMAND_METADATA_HITS = STATS["demand_metadata_hits"] + DEMAND_METADATA_MISSES = STATS["demand_metadata_misses"] + MFU_GHOST_HITS = STATS["mfu_ghost_hits"] + MFU_HITS = STATS["mfu_hits"] + MRU_GHOST_HITS = STATS["mru_ghost_hits"] + MRU_HITS = STATS["mru_hits"] + PREFETCH_DATA_HITS = STATS["prefetch_data_hits"] + PREFETCH_DATA_MISSES = STATS["prefetch_data_misses"] + PREFETCH_METADATA_HITS = STATS["prefetch_metadata_hits"] + PREFETCH_METADATA_MISSES = STATS["prefetch_metadata_misses"] ANON_HITS = ARC_HITS - (MFU_HITS + MRU_HITS + MFU_GHOST_HITS + MRU_GHOST_HITS) ARC_ACCESSES_TOTAL = ARC_HITS + ARC_MISSES @@ -108,9 +113,15 @@ def main(args): CACHE_HIT_PERCENT = ARC_HITS / ARC_ACCESSES_TOTAL * 100 CACHE_MISS_PERCENT = ARC_MISSES / ARC_ACCESSES_TOTAL * 100 ACTUAL_HIT_PERCENT = REAL_HITS / ARC_ACCESSES_TOTAL * 100 - DATA_DEMAND_PERCENT = DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 + DATA_DEMAND_PERCENT = ( + DEMAND_DATA_HITS / DEMAND_DATA_TOTAL * 100 if DEMAND_DATA_TOTAL != 0 else 0 + ) - DATA_PREFETCH_PERCENT = PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 if PREFETCH_DATA_TOTAL != 0 else 0 + DATA_PREFETCH_PERCENT = ( + PREFETCH_DATA_HITS / PREFETCH_DATA_TOTAL * 100 + if PREFETCH_DATA_TOTAL != 0 + else 0 + ) ANON_HITS_PERCENT = ANON_HITS / ARC_HITS * 100 if ANON_HITS != 0 else 0 @@ -121,125 +132,157 @@ def main(args): DEMAND_HITS_PERCENT = DEMAND_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 PREFETCH_HITS_PERCENT = PREFETCH_DATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 - PREFETCH_METADATA_HITS_PERCENT = DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + METADATA_HITS_PERCENT = ( + DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + ) + PREFETCH_METADATA_HITS_PERCENT = ( + DEMAND_METADATA_HITS / ARC_HITS * 100 if ARC_HITS != 0 else 0 + ) - DEMAND_MISSES_PERCENT = DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - PREFETCH_MISSES_PERCENT = PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - METADATA_MISSES_PERCENT = DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 - PREFETCH_METADATA_MISSES_PERCENT = PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + DEMAND_MISSES_PERCENT = ( + DEMAND_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + PREFETCH_MISSES_PERCENT = ( + PREFETCH_DATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + METADATA_MISSES_PERCENT = ( + DEMAND_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) + PREFETCH_METADATA_MISSES_PERCENT = ( + PREFETCH_METADATA_MISSES / ARC_MISSES * 100 if ARC_MISSES != 0 else 0 + ) # pools exact_size = True - zpool_cmd = ['/sbin/zpool'] - zpool_cmd_list = zpool_cmd + ['list', '-p', '-H'] - std = {'stdout': subprocess.PIPE, 'stderr': subprocess.PIPE, 'universal_newlines': True} + zpool_cmd = ["/sbin/zpool"] + zpool_cmd_list = zpool_cmd + ["list", "-p", "-H"] + std = { + "stdout": subprocess.PIPE, + "stderr": subprocess.PIPE, + "universal_newlines": True, + } ## account for variations between ZoL zfs versions proc = subprocess.run(zpool_cmd_list, **std) - if (proc.returncode == 2): + if proc.returncode == 2: # -p option is not present in older versions # edit snmpd.conf zfs extend section to the following: # extend zfs /usr/bin/sudo /etc/snmp/zfs-linux # make sure to edit your sudo users (usually visudo) and add at the bottom: # snmp ALL=(ALL) NOPASSWD: /etc/snmp/zfs-linux - del zpool_cmd_list[zpool_cmd_list.index('-p')] # try removing -p to fix the issue + del zpool_cmd_list[ + zpool_cmd_list.index("-p") + ] # try removing -p to fix the issue proc = subprocess.run(zpool_cmd_list, **std) exact_size = False - if (proc.returncode != 0): + if proc.returncode != 0: return proc_err(zpool_cmd_list, proc) pools = [] - FIELDS = ['name', 'size', 'alloc', 'free', 'ckpoint', 'expandsz', 'frag', 'cap', 'dedup', 'health', 'altroot'] - if len(proc.stdout.splitlines()[0].split('\t')) == 10: - FIELDS.remove('ckpoint') - + FIELDS = [ + "name", + "size", + "alloc", + "free", + "ckpoint", + "expandsz", + "frag", + "cap", + "dedup", + "health", + "altroot", + ] + if len(proc.stdout.splitlines()[0].split("\t")) == 10: + FIELDS.remove("ckpoint") + for line in proc.stdout.splitlines(): - info = dict(zip(FIELDS, line.split('\t'))) + info = dict(zip(FIELDS, line.split("\t"))) - info['expandsz'] = 0 if info['expandsz'] == '-' else info['expandsz'] - info['frag'] = info['frag'].rstrip('%') - info['frag'] = 0 if info['frag'] == '-' else info['frag'] - info['dedup'] = info['dedup'].rstrip('x') - info['cap'] = info['cap'].rstrip('%') - if 'ckpoint' in info: - info['ckpoint'] = 0 if info['ckpoint'] == '-' else info['ckpoint'] + info["expandsz"] = 0 if info["expandsz"] == "-" else info["expandsz"] + info["frag"] = info["frag"].rstrip("%") + info["frag"] = 0 if info["frag"] == "-" else info["frag"] + info["dedup"] = info["dedup"].rstrip("x") + info["cap"] = info["cap"].rstrip("%") + if "ckpoint" in info: + info["ckpoint"] = 0 if info["ckpoint"] == "-" else info["ckpoint"] # zfs-06.5.11 fix if not exact_size: - zpool_cmd_get = zpool_cmd + ['get', '-pH', 'size,alloc,free', info['name']] + zpool_cmd_get = zpool_cmd + ["get", "-pH", "size,alloc,free", info["name"]] proc2 = subprocess.run(zpool_cmd_get, **std) - if (proc2.returncode != 0): + if proc2.returncode != 0: return proc_err(zpool_cmd_get, proc2) - info2 = dict([tuple(s.split('\t')[1:3]) for s in proc2.stdout.splitlines()]) - info['size'] = info2['size'] - info['alloc'] = info2['allocated'] - info['free'] = info2['free'] + info2 = dict([tuple(s.split("\t")[1:3]) for s in proc2.stdout.splitlines()]) + info["size"] = info2["size"] + info["alloc"] = info2["allocated"] + info["free"] = info2["free"] pools.append(info) res = { - 'deleted': DELETED, # ARC misc - 'evict_skip': EVICT_SKIP, - 'mutex_skip': MUTEX_SKIP, - 'recycle_miss': RECYCLE_MISS, - 'arc_size': ARC_SIZE, # ARC size - 'target_size_max': TARGET_SIZE_MAX, - 'target_size_min': TARGET_SIZE_MIN, - 'target_size': TARGET_SIZE, - 'target_size_per': TARGET_SIZE_PERCENT, - 'arc_size_per': ARC_SIZE_PERCENT, - 'target_size_arat': TARGET_SIZE_ADAPTIVE_RATIO, - 'min_size_per': MIN_SIZE_PERCENT, - 'mfu_size': MFU_SIZE, # ARC size breakdown - 'p': P, - 'rec_used_per': RECENTLY_USED_PERCENT, - 'freq_used_per': FREQUENTLY_USED_PERCENT, - 'arc_hits': ARC_HITS, # ARC efficiency - 'arc_misses': ARC_MISSES, - 'demand_data_hits': DEMAND_DATA_HITS, - 'demand_data_misses': DEMAND_DATA_MISSES, - 'demand_meta_hits': DEMAND_METADATA_HITS, - 'demand_meta_misses': DEMAND_METADATA_MISSES, - 'mfu_ghost_hits': MFU_GHOST_HITS, - 'mfu_hits': MFU_HITS, - 'mru_ghost_hits': MRU_GHOST_HITS, - 'mru_hits': MRU_HITS, - 'pre_data_hits': PREFETCH_DATA_HITS, - 'pre_data_misses': PREFETCH_DATA_MISSES, - 'pre_meta_hits': PREFETCH_METADATA_HITS, - 'pre_meta_misses': PREFETCH_METADATA_HITS, - 'anon_hits': ANON_HITS, - 'arc_accesses_total': ARC_ACCESSES_TOTAL, - 'demand_data_total': DEMAND_DATA_TOTAL, - 'pre_data_total': PREFETCH_DATA_TOTAL, - 'real_hits': REAL_HITS, - 'cache_hits_per': CACHE_HIT_PERCENT, # ARC efficiency percentages - 'cache_miss_per': CACHE_MISS_PERCENT, - 'actual_hit_per': ACTUAL_HIT_PERCENT, - 'data_demand_per': DATA_DEMAND_PERCENT, - 'data_pre_per': DATA_PREFETCH_PERCENT, - 'anon_hits_per': ANON_HITS_PERCENT, - 'mru_per': MRU_PERCENT, - 'mfu_per': MFU_PERCENT, - 'mru_ghost_per': MRU_GHOST_PERCENT, - 'mfu_ghost_per': MFU_GHOST_PERCENT, - 'demand_hits_per': DEMAND_HITS_PERCENT, - 'pre_hits_per': PREFETCH_HITS_PERCENT, - 'meta_hits_per': METADATA_HITS_PERCENT, - 'pre_meta_hits_per': PREFETCH_METADATA_HITS_PERCENT, - 'demand_misses_per': DEMAND_MISSES_PERCENT, - 'pre_misses_per': PREFETCH_MISSES_PERCENT, - 'meta_misses_per': METADATA_MISSES_PERCENT, - 'pre_meta_misses_per': PREFETCH_METADATA_MISSES_PERCENT, - 'pools': pools + "deleted": DELETED, # ARC misc + "evict_skip": EVICT_SKIP, + "mutex_skip": MUTEX_SKIP, + "recycle_miss": RECYCLE_MISS, + "arc_size": ARC_SIZE, # ARC size + "target_size_max": TARGET_SIZE_MAX, + "target_size_min": TARGET_SIZE_MIN, + "target_size": TARGET_SIZE, + "target_size_per": TARGET_SIZE_PERCENT, + "arc_size_per": ARC_SIZE_PERCENT, + "target_size_arat": TARGET_SIZE_ADAPTIVE_RATIO, + "min_size_per": MIN_SIZE_PERCENT, + "mfu_size": MFU_SIZE, # ARC size breakdown + "p": P, + "rec_used_per": RECENTLY_USED_PERCENT, + "freq_used_per": FREQUENTLY_USED_PERCENT, + "arc_hits": ARC_HITS, # ARC efficiency + "arc_misses": ARC_MISSES, + "demand_data_hits": DEMAND_DATA_HITS, + "demand_data_misses": DEMAND_DATA_MISSES, + "demand_meta_hits": DEMAND_METADATA_HITS, + "demand_meta_misses": DEMAND_METADATA_MISSES, + "mfu_ghost_hits": MFU_GHOST_HITS, + "mfu_hits": MFU_HITS, + "mru_ghost_hits": MRU_GHOST_HITS, + "mru_hits": MRU_HITS, + "pre_data_hits": PREFETCH_DATA_HITS, + "pre_data_misses": PREFETCH_DATA_MISSES, + "pre_meta_hits": PREFETCH_METADATA_HITS, + "pre_meta_misses": PREFETCH_METADATA_HITS, + "anon_hits": ANON_HITS, + "arc_accesses_total": ARC_ACCESSES_TOTAL, + "demand_data_total": DEMAND_DATA_TOTAL, + "pre_data_total": PREFETCH_DATA_TOTAL, + "real_hits": REAL_HITS, + "cache_hits_per": CACHE_HIT_PERCENT, # ARC efficiency percentages + "cache_miss_per": CACHE_MISS_PERCENT, + "actual_hit_per": ACTUAL_HIT_PERCENT, + "data_demand_per": DATA_DEMAND_PERCENT, + "data_pre_per": DATA_PREFETCH_PERCENT, + "anon_hits_per": ANON_HITS_PERCENT, + "mru_per": MRU_PERCENT, + "mfu_per": MFU_PERCENT, + "mru_ghost_per": MRU_GHOST_PERCENT, + "mfu_ghost_per": MFU_GHOST_PERCENT, + "demand_hits_per": DEMAND_HITS_PERCENT, + "pre_hits_per": PREFETCH_HITS_PERCENT, + "meta_hits_per": METADATA_HITS_PERCENT, + "pre_meta_hits_per": PREFETCH_METADATA_HITS_PERCENT, + "demand_misses_per": DEMAND_MISSES_PERCENT, + "pre_misses_per": PREFETCH_MISSES_PERCENT, + "meta_misses_per": METADATA_MISSES_PERCENT, + "pre_meta_misses_per": PREFETCH_METADATA_MISSES_PERCENT, + "pools": pools, } print(json.dumps(res)) return 0 -if __name__ == '__main__': + +if __name__ == "__main__": import sys + sys.exit(main(sys.argv[1:])) From d4129607c83d6d0ba0349aa1c8aa35919334cd6f Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 20:07:25 +0100 Subject: [PATCH 295/497] Lint more files (#356) --- agent-local/bind | 6 +-- agent-local/check_mrpe | 7 ++- agent-local/dmi | 2 +- agent-local/nfsstats | 2 +- agent-local/rocks.sh | 1 + snmp/Openwrt/wlClients.sh | 9 ++-- snmp/Openwrt/wlFrequency.sh | 5 +-- snmp/Openwrt/wlNoiseFloor.sh | 5 +-- snmp/Openwrt/wlRate.sh | 13 +++--- snmp/Openwrt/wlSNR.sh | 13 +++--- snmp/apache-stats.sh | 3 +- snmp/backupninja.py | 26 +++++------ snmp/chip.sh | 6 +-- snmp/distro | 68 ++++++++++++++--------------- snmp/freeradius.sh | 83 ++++++++++++++++++------------------ snmp/gpsd | 3 +- snmp/icecast-stats.sh | 2 +- snmp/mdadm | 5 ++- snmp/ntp-client | 3 +- snmp/ntp-server.sh | 10 +++-- snmp/phpfpmsp | 12 +++--- snmp/pi-hole | 4 +- snmp/powermon-snmp.py | 4 +- snmp/shoutcast.php | 1 + snmp/voipmon-stats.sh | 2 +- 25 files changed, 148 insertions(+), 147 deletions(-) diff --git a/agent-local/bind b/agent-local/bind index ed294e3e2..f30597c0d 100755 --- a/agent-local/bind +++ b/agent-local/bind @@ -4,18 +4,18 @@ # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. -# +# # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. -# +# # You should have received a copy of the GNU General Public License # along with this program. If not, see . stats="/etc/bind/named.stats" echo "<<>>" -> $stats +true > $stats rndc stats && cat $stats diff --git a/agent-local/check_mrpe b/agent-local/check_mrpe index d21b6d9a1..ab6e809ab 100644 --- a/agent-local/check_mrpe +++ b/agent-local/check_mrpe @@ -14,7 +14,7 @@ EXITCODE=3 while getopts "Vha:H:p:" opt; do case $opt in - V) printf "check_mrpe v$VERSION\n" + V) printf 'check_mrpe v%s\n' "$VERSION" exit 0 ;; h) hflag=1 @@ -25,8 +25,7 @@ while getopts "Vha:H:p:" opt; do H) Hflag=1 Hval="$OPTARG" ;; - p) pflag=1 - pval="$OPTARG" + p) pval="$OPTARG" ;; \?) hflag=1 EXITCODE=0 @@ -80,4 +79,4 @@ for i in $($BIN_NC -w 1 "$Hval" "$pval" 2>&1 | $BIN_SED '/^<<>>/,/^<<>>' # requires dmidecode for FIELD in bios-vendor bios-version bios-release-date system-manufacturer system-product-name system-version system-serial-number system-uuid baseboard-manufacturer baseboard-product-name baseboard-version baseboard-serial-number baseboard-asset-tag chassis-manufacturer chassis-type chassis-version chassis-serial-number chassis-asset-tag processor-family processor-manufacturer processor-version processor-frequency do - echo $FIELD=$(dmidecode -s $FIELD | grep -v '^#') + echo $FIELD="$(dmidecode -s $FIELD | grep -v '^#')" done diff --git a/agent-local/nfsstats b/agent-local/nfsstats index 404e2fd06..a43afe786 100755 --- a/agent-local/nfsstats +++ b/agent-local/nfsstats @@ -21,7 +21,7 @@ LOG_NEW='/var/cache/librenms/nfsstats_new' $BIN_NFSSTAT -$CFG_NFSVER -n -l | $BIN_TR -s " " | $BIN_CUT -d ' ' -f 5 | $BIN_GREP -v '^$' > $LOG_NEW 2>&1 $BIN_PASTE $LOG_NEW $LOG_OLD | while read a b ; do - echo $(($a - $b)) + echo $(($a - $b)) done $BIN_RM $LOG_OLD 2>&1 diff --git a/agent-local/rocks.sh b/agent-local/rocks.sh index 9ff1ec35e..7636a0274 100755 --- a/agent-local/rocks.sh +++ b/agent-local/rocks.sh @@ -7,6 +7,7 @@ # @author SvennD # required +# shellcheck disable=SC1091 source /etc/profile.d/sge-binaries.sh; QSTAT="/opt/gridengine/bin/linux-x64/qstat" diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index 5becad170..f454e592f 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlClients.sh # Counts connected (associated) Wi-Fi devices # Arguments: targed interface. Assumes all interfaces if no argument @@ -12,16 +12,13 @@ if [ $# -gt 1 ]; then fi # Get path to this script -scriptdir=$(dirname $(readlink -f -- "$0")) +scriptdir=$(dirname "$(readlink -f -- "$0")") -# Get hostname, interface list. Set target, which is name returned for interface -hostname=$(/bin/uname -n) +# Get interface list. Set target, which is name returned for interface if [ "$1" ]; then interfaces=$1 - target=$1 else interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",") - target=wlan fi # Count associated devices diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 83e68b1d1..658459ab5 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlFrequency.sh # Returns wlFrequency, in MHz (not channel number) # Arguments: targed interface @@ -11,8 +11,7 @@ if [ $# -ne 1 ]; then exit 1 fi -# Get hostname, extract frequency -hostname=$(/bin/uname -n) +# Extract frequency frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index 47d4b4ec2..a3880cf34 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlNoiseFloor.sh # Returns wlNoiseFloor, in dBm # Arguments: targed interface @@ -11,9 +11,8 @@ if [ $# -ne 1 ]; then exit 1 fi -# Get hostname, extract noise floor. Note, all associated stations have the same value, so just grab the first one +# Extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -hostname=$(/bin/uname -n) noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 08b68b1bd..6b9072435 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlRate.sh # Returns wlRate, bit rate in Mbit/s # Arguments: @@ -14,17 +14,16 @@ if [ $# -ne 3 ]; then exit 1 fi -# Get hostname, calculate result. Sum just for debug, and have to return integer +# Calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -hostname=$(/bin/uname -n) ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") -if [ "$3" == "sum" ]; then +if [ "$3" = "sum" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') -elif [ "$3" == "avg" ]; then +elif [ "$3" = "avg" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}') -elif [ "$3" == "min" ]; then +elif [ "$3" = "min" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}') -elif [ "$3" == "max" ]; then +elif [ "$3" = "max" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}') fi diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index d19283d82..2378c1aac 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -1,5 +1,5 @@ #!/bin/sh - + # wlSNR.sh # Returns wlSNR, Signal-to-Noise ratio in dB # Arguments: @@ -13,16 +13,15 @@ if [ $# -ne 2 ]; then exit 1 fi -# Get hostname, calculate result. Sum just for debug, and return integer (safest / easiest) -hostname=$(/bin/uname -n) +# Calculate result. Sum just for debug, and return integer (safest / easiest) snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) -if [ "$2" == "sum" ]; then +if [ "$2" = "sum" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') -elif [ "$2" == "avg" ]; then +elif [ "$2" = "avg" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}') -elif [ "$2" == "min" ]; then +elif [ "$2" = "min" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}') -elif [ "$2" == "max" ]; then +elif [ "$2" = "max" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}') fi diff --git a/snmp/apache-stats.sh b/snmp/apache-stats.sh index 9b677fd3a..be14a6917 100755 --- a/snmp/apache-stats.sh +++ b/snmp/apache-stats.sh @@ -12,7 +12,7 @@ PATH=/sbin:/bin:/usr/sbin:/usr/bin function debugecho() { if [ ${#Debug} -gt 0 ]; then - echo debug: $@ + echo debug: "$@" fi } @@ -23,6 +23,7 @@ function debugecho() { Tmp_File=/tmp/apache_status # Debug=on; use environment, i.e. Debug=on apache-stats.sh +# shellcheck disable=SC2153 if [ "${DEBUG}" != "" ]; then Debug=${DEBUG} else diff --git a/snmp/backupninja.py b/snmp/backupninja.py index 80cf55f7f..7ae1b46a9 100644 --- a/snmp/backupninja.py +++ b/snmp/backupninja.py @@ -20,20 +20,20 @@ if not os.path.isfile(logfile): error_string = "file unavailable" error = 1 - break +else: + with io.open(logfile, "r") as f: + for line in reversed(list(f)): + match = re.search( + "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", + line, + ) + if match: + backupninja_datas["last_actions"] = int(match.group(2)) + backupninja_datas["last_fatal"] = int(match.group(3)) + backupninja_datas["last_error"] = int(match.group(4)) + backupninja_datas["last_warning"] = int(match.group(5)) + break -with io.open(logfile, "r") as f: - for line in reversed(list(f)): - match = re.search( - "^(.*) [a-zA-Z]*: FINISHED: ([0-9]+) actions run. ([0-9]+) fatal. ([0-9]+) error. ([0-9]+) warning.$", - line, - ) - if match: - backupninja_datas["last_actions"] = int(match.group(2)) - backupninja_datas["last_fatal"] = int(match.group(3)) - backupninja_datas["last_error"] = int(match.group(4)) - backupninja_datas["last_warning"] = int(match.group(5)) - break output = { "version": version, diff --git a/snmp/chip.sh b/snmp/chip.sh index 4dc2fac05..ff2cebbb4 100644 --- a/snmp/chip.sh +++ b/snmp/chip.sh @@ -1,5 +1,5 @@ #!/bin/bash -# Based on https://github.com/Photonicsguy/CHIP +# Based on https://github.com/Photonicsguy/CHIP # Enable ADC registers i2cset -y -f 0 0x34 0x82 0xff @@ -67,7 +67,7 @@ if [ $STATUS_BATCON == 1 ]; then BAT_PERCENT=$(printf "%d" "$REG") else VBAT=0 - BATT_CUR=0 + #BATT_CUR=0 BAT_PERCENT=0 fi @@ -82,6 +82,6 @@ echo $ACIN_C echo $VBUS echo $VBUS_C echo $VBAT -echo $(echo "$BAT_C-$BAT_D"|bc) +echo "$(echo "$BAT_C-$BAT_D"|bc)" echo $BAT_PERCENT echo $STATUS_CHARGING diff --git a/snmp/distro b/snmp/distro index 69cd452a9..f481bbee6 100755 --- a/snmp/distro +++ b/snmp/distro @@ -1,24 +1,24 @@ #!/usr/bin/env sh # Detects which OS and if it is Linux then it will detect which Linux Distribution. -OS=`uname -s` -REV=`uname -r` -MACH=`uname -m` +OS=$(uname -s) +REV=$(uname -r) +#MACH=$(uname -m) if [ "${OS}" = "SunOS" ] ; then OS=Solaris - ARCH=`uname -p` - OSSTR="${OS} ${REV}(${ARCH} `uname -v`)" + ARCH=$(uname -p) + OSSTR="${OS} ${REV}(${ARCH} $(uname -v))" elif [ "${OS}" = "AIX" ] ; then - OSSTR="${OS} `oslevel` (`oslevel -r`)" + OSSTR="${OS} $(oslevel) ($(oslevel -r))" elif [ "${OS}" = "Linux" ] ; then - KERNEL=`uname -r` + #KERNEL=$(uname -r) if [ -f /etc/fedora-release ]; then DIST=$(cat /etc/fedora-release | awk '{print $1}') - REV=`cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//` + REV=$(cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') @@ -29,8 +29,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="CloudLinux" elif [ "${DIST}" = "Mandriva" ]; then DIST="Mandriva" - PSEUDONAME=`cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/mandriva-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/mandriva-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/oracle-release ]; then DIST="Oracle" elif [ -f /etc/rockstor-release ]; then @@ -39,39 +39,39 @@ elif [ "${OS}" = "Linux" ] ; then DIST="RedHat" fi - PSEUDONAME=`cat /etc/redhat-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/mandrake-release ] ; then DIST='Mandrake' - PSEUDONAME=`cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//` - REV=`cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//` + #PSEUDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/devuan_version ] ; then - DIST="Devuan `cat /etc/devuan_version`" + DIST="Devuan $(cat /etc/devuan_version)" REV="" elif [ -f /etc/debian_version ] ; then - DIST="Debian `cat /etc/debian_version`" + DIST="Debian $(cat /etc/debian_version)" REV="" IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then - ID=`lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g'` + ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g') fi if [ "${ID}" = "Raspbian" ] ; then - DIST="Raspbian `cat /etc/debian_version`" + DIST="Raspbian $(cat /etc/debian_version)" fi if [ -f /usr/bin/pveversion ]; then - DIST="${DIST}/PVE `/usr/bin/pveversion | cut -d '/' -f 2`" + DIST="${DIST}/PVE $(/usr/bin/pveversion | cut -d '/' -f 2)" fi if [ -f /usr/bin/pmgversion ]; then # pmgversion requires root permissions to run, please add NOPASSWD setting to visudo. - DIST="${DIST}/PMG `sudo /usr/bin/pmgversion | cut -d '/' -f 2`" + DIST="${DIST}/PMG $(sudo /usr/bin/pmgversion | cut -d '/' -f 2)" fi if [ -f /etc/dogtag ]; then - DIST=`cat /etc/dogtag` + DIST=$(cat /etc/dogtag) fi - + elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" REV=$(tr -d '[[:alpha:]]' /dev/null 2>&1 + REV=$(nvram show | grep buildno= | egrep -o '[0-9].[0-9].[0-9]') > /dev/null 2>&1 fi fi # try standardized os version methods - if [ -f /etc/os-release -a "${IGNORE_OS_RELEASE}" != 1 ] ; then + if [ -f /etc/os-release ] && [ "${IGNORE_OS_RELEASE}" != 1 ] ; then . /etc/os-release STD_DIST="$NAME" STD_REV="$VERSION_ID" - elif [ -f /etc/lsb-release -a "${IGNORE_LSB}" != 1 ] ; then + elif [ -f /etc/lsb-release ] && [ "${IGNORE_LSB}" != 1 ] ; then STD_DIST=$(lsb_release -si) STD_REV=$(lsb_release -sr) fi @@ -133,18 +133,18 @@ elif [ "${OS}" = "Linux" ] ; then elif [ "${OS}" = "Darwin" ] ; then if [ -f /usr/bin/sw_vers ] ; then - OSSTR=`/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' '` + OSSTR=$(/usr/bin/sw_vers|grep -v Build|sed 's/^.*:.//'| tr "\n" ' ') fi elif [ "${OS}" = "FreeBSD" ] ; then if [ -f /etc/version ] ; then DIST=$(cat /etc/version | cut -d'-' -f 1) if [ "${DIST}" = "FreeNAS" ]; then - OSSTR=`cat /etc/version | cut -d' ' -f 1` + OSSTR=$(cat /etc/version | cut -d' ' -f 1) fi else - OSSTR=`/usr/bin/uname -mior` + OSSTR=$(/usr/bin/uname -mior) fi fi -echo ${OSSTR} +echo "${OSSTR}" diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh index 8a9423b38..6a0a29fb6 100644 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -11,6 +11,7 @@ RADIUS_PORT='18121' RADIUS_KEY='adminsecret' if [ -f $CONFIGFILE ]; then + # shellcheck disable=SC1090 . $CONFIGFILE fi @@ -27,44 +28,44 @@ fi RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo "$RESULT" | grep -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' +echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' diff --git a/snmp/gpsd b/snmp/gpsd index eed38c4bc..913f43d95 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -18,12 +18,13 @@ BIN_PYTHON='/usr/bin/env python' # Check for config file CONFIG=$0".conf" if [ -f "$CONFIG" ]; then + # shellcheck disable=SC1090 . "$CONFIG" fi # Create Temp File TMPFILE=$(mktemp) -trap "rm -f $TMPFILE" 0 2 3 15 +trap 'rm -f $TMPFILE' 0 2 3 15 # Write GPSPIPE Data to Temp File $BIN_GPIPE -w -n 20 > "$TMPFILE" diff --git a/snmp/icecast-stats.sh b/snmp/icecast-stats.sh index 541c174c8..e373f6f97 100644 --- a/snmp/icecast-stats.sh +++ b/snmp/icecast-stats.sh @@ -5,7 +5,7 @@ used_memory=$(ps -U icecast -o rsz | awk 'FNR==2{print}') cpu_load=$(ps -U icecast -o %cpu | awk 'FNR==2{print}') pid=$(pidof icecast) -total_files=$(ls -l /proc/"${pid}"/fd | wc -l) +total_files=$(find /proc/"${pid}"/fd | wc -l) echo "Used Memory=""$used_memory" echo "CPU Load=""$cpu_load" diff --git a/snmp/mdadm b/snmp/mdadm index 8565f8d69..5e820c808 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -7,7 +7,8 @@ REALPATH=/usr/bin/realpath CONFIGFILE=/etc/snmp/mdadm.conf if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi VERSION=1 @@ -19,7 +20,7 @@ OUTPUT_DATA='[' # use 'ls' command to check if md blocks exist if $LS /dev/md?* 1> /dev/null 2>&1 ; then for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME $($REALPATH "$ARRAY_BLOCKDEVICE")) + RAID="/sys/block/"$($BASENAME "$($REALPATH "$ARRAY_BLOCKDEVICE")") # ignore arrays with no slaves if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then diff --git a/snmp/ntp-client b/snmp/ntp-client index 0df9ee07b..eccb5e50c 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -13,13 +13,12 @@ # Don't change anything unless you know what are you doing # ################################################################ BIN_NTPQ='/usr/bin/env ntpq' -BIN_NTPD='/usr/bin/env ntpd' BIN_GREP='/usr/bin/env grep' BIN_AWK='/usr/bin/env awk' -BIN_HEAD='/usr/bin/env head' CONFIG=$0".conf" if [ -f "$CONFIG" ]; then + # shellcheck disable=SC1090 . "$CONFIG" fi diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 6fa2f6908..ba1af1593 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -3,7 +3,7 @@ # Alternatively you can put them in $0.conf, meaning if you've named # this script ntp-client.sh then it must go in ntp-client.sh.conf . # -# NTPQV output version of "ntpq -c rv" +# NTPQV output version of "ntpq -c rv" # p1 DD-WRT and some other outdated linux distros # p11 FreeBSD 11 and any linux distro that is up to date # @@ -16,7 +16,8 @@ CONFIGFILE=/etc/snmp/ntp-server.conf BIN_ENV='/usr/bin/env' if [ -f $CONFIGFILE ] ; then - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi BIN_NTPD="$BIN_ENV ntpd" @@ -34,7 +35,8 @@ NTPQV="p11" ################################################################ CONFIG=$0".conf" if [ -f "$CONFIG" ]; then - . "$CONFIG" + # shellcheck disable=SC1090 + . "$CONFIG" fi VERSION=1 @@ -75,7 +77,7 @@ IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') -INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') +#INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') echo '{"data":{"offset":"'"$OFFSET"\ diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index a4d7a4339..6fbf22c57 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -52,13 +52,13 @@ opts="" # Contributed by @safeie with PR #276 # Modified to work as a SNMP extend by Zane C. Bowers-Hadley -declare -A phpfpm_urls=() -declare -A phpfpm_curl_opts=() +#declare -A phpfpm_urls=() +#declare -A phpfpm_curl_opts=() # _update_every is a special variable - it holds the number of seconds # between the calls of the _update() function -phpfpm_update_every= -phpfpm_priority=60000 +#phpfpm_update_every= +#phpfpm_priority=60000 declare -a phpfpm_response=() phpfpm_pool="" @@ -78,8 +78,8 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - phpfpm_response=($(curl -Ss "${opts}" "${url}")) - [ $? -ne 0 -o "${#phpfpm_response[@]}" -eq 0 ] && exit 1 + phpfpm_response=("$(curl -Ss "${opts}" "${url}")") + [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ || "${phpfpm_response[2]}" != "process" \ diff --git a/snmp/pi-hole b/snmp/pi-hole index 342ef105b..2a7682f9f 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -12,11 +12,13 @@ PICONFIGFILE='/etc/pihole/setupVars.conf' DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then + # shellcheck disable=SC1090 . $CONFIGFILE fi # read in pi-hole variables for DHCP range if [ -f $PICONFIGFILE ]; then + # shellcheck disable=SC1090 . $PICONFIGFILE fi @@ -74,7 +76,7 @@ debug() { fi if [ -f $PICONFIGFILE ]; then echo '[ok] Pi-Hole config file exists, DHCP stats will be captured if scope active' - else + else echo '[error] Pi-Hole config file does not exist, DHCP stats will not be captured if used' fi if [ -f $DHCPLEASEFILE ]; then diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index e280fe710..d9f179c1b 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -98,7 +98,7 @@ + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" ) methods = ["sensors", "hpasmcli"] -# costPerkWh = 0.15 # <<<< UNCOMMENT +# costPerkWh = 0.15 # <<<< CHANGE ### General functions @@ -352,7 +352,7 @@ def getHPASMData(): # Get data data = getData(method) data["supply"] = {} -data["supply"]["rate"] = costPerkWh +data["supply"]["rate"] = costPerkWh # pylint: disable=E0602 # Top-level reading # CUSTOMISE THIS FOR YOUR HOST diff --git a/snmp/shoutcast.php b/snmp/shoutcast.php index 637d3abe4..05ee52d47 100755 --- a/snmp/shoutcast.php +++ b/snmp/shoutcast.php @@ -39,6 +39,7 @@ function get_data($host, $port) { + $rawdata = null; $fp = @fsockopen($host, $port, $errno, $errstr, 5); if (!$fp) { $connect = 0; diff --git a/snmp/voipmon-stats.sh b/snmp/voipmon-stats.sh index 1dcab4d9f..66f943941 100644 --- a/snmp/voipmon-stats.sh +++ b/snmp/voipmon-stats.sh @@ -5,7 +5,7 @@ used_memory=$(ps -C voipmonitor -o rsz | awk 'FNR==2 {print}') cpu_load=$(ps -C voipmonitor -o %cpu | awk 'FNR==2 {print}') pid=$(pidof voipmonitor) -total_files=$(ls -l /proc/"${pid}"/fd | wc -l) +total_files=$(find /proc/"${pid}"/fd | wc -l) echo "Used Memory=""$used_memory" echo "CPU Load=""$cpu_load" From eb74a35026278672d17738f0047c304bdaf33aae Mon Sep 17 00:00:00 2001 From: Serphentas Date: Thu, 18 Mar 2021 20:11:19 +0100 Subject: [PATCH 296/497] Add chrony support (#345) * initial chronyc json sampler * make use of standard data format * fix last_rx json key --- snmp/chrony | 111 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100644 snmp/chrony diff --git a/snmp/chrony b/snmp/chrony new file mode 100644 index 000000000..08cca7139 --- /dev/null +++ b/snmp/chrony @@ -0,0 +1,111 @@ +#!/usr/bin/env python3 +import json +import shlex +import subprocess + +VERSION = 1 + +def proc_err(cmd, proc): + # output process error and first line of error code + return "{}{}".format( + subprocess.CalledProcessError(proc.returncode, cmd, proc.stderr), + " ({})".format(proc.stderr.splitlines()[0]) if proc.stderr.splitlines() else "" + ) + +def print_data(data, error, error_msg): + print(json.dumps({ + 'data': data, + 'error': error, + 'errorString': error_msg, + 'version': VERSION + })) + +def main(args): + CSV_HEADERS = { + 'tracking': [ + 'reference_name', + 'reference_type', + 'stratum', + 'reference_time', + 'system_time', + 'last_offset', + 'rms_offset', + 'frequency', + 'residual_frequency', + 'skew', + 'root_delay', + 'root_dispersion', + 'update_interval', + 'leap_status', + ], + 'sources': [ + 'source_mode', + 'source_state', + 'source_name', + 'stratum', + 'polling_rate', + 'reachability', + 'last_rx', + 'adjusted_offset', + 'measured_offset', + 'estimated_error' + ], + 'sourcestats': [ + 'source_name', + 'number_samplepoints', + 'number_runs', + 'span', + 'frequency', + 'frequency_skew', + 'offset', + 'stddev', + ] + } + DATA = { + 'tracking': {}, + 'sources': [] + } + ERROR = False + ERROR_MSG = '' + + # get and set tracking data + rc, tracking = subprocess.getstatusoutput('chronyc -c tracking') + if rc != 0: + print_data(DATA, rc, tracking) + return 1 + tracking = tracking.split(',') + for i in range(0, len(CSV_HEADERS['tracking'])): + DATA['tracking'][CSV_HEADERS['tracking'][i]] = tracking[i] + + # get sources + sourcestats data + rc, sources = subprocess.getstatusoutput('chronyc -c sources') + if rc != 0: + print_data(DATA, rc, sources) + return 1 + sources = sources.split('\n') + rc, sourcestats = subprocess.getstatusoutput('chronyc -c sourcestats') + if rc != 0: + print_data(DATA, rc, sourcestats) + return 1 + sourcestats = sourcestats.split('\n') + + # mix sources and sourcestats + for i in range(0, len(sources)): + source = sources[i].split(',') + stats = sourcestats[i].split(',') + data = {} + + for j in range(0, len(CSV_HEADERS['sources'])): + data[CSV_HEADERS['sources'][j]] = source[j] + for j in range(0, len(CSV_HEADERS['sourcestats'])): + data[CSV_HEADERS['sourcestats'][j]] = stats[j] + + DATA['sources'].append(data) + + print_data(DATA, ERROR, ERROR_MSG) + + return 0 + +if __name__ == '__main__': + import sys + sys.exit(main(sys.argv[1:])) From 5841c2eee801d6d9844f5f757b54f6031d509944 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Thu, 18 Mar 2021 20:12:42 +0100 Subject: [PATCH 297/497] Add linter (#357) --- .github/workflows/linter.yml | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .github/workflows/linter.yml diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml new file mode 100644 index 000000000..5c2015aef --- /dev/null +++ b/.github/workflows/linter.yml @@ -0,0 +1,34 @@ +name: Lint Code Base + +on: + push: + pull_request: + branches: [master] + +jobs: + build: + name: Lint Code Base + runs-on: ubuntu-latest + + steps: + - name: Checkout Code + uses: actions/checkout@v2 + with: + # Full git history is needed to get a proper list of changed files within `super-linter` + fetch-depth: 0 + + - name: Lint Code Base + uses: github/super-linter@v3.15.3 + env: + FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + SUPPRESS_POSSUM: true + + VALIDATE_BASH_EXEC: false + VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PHP_PHPCS: false + VALIDATE_PHP_PSALM: false + + SHELLCHECK_OPTS: --severity=warning + + DEFAULT_BRANCH: master + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} From 086af1f1c36f6fa36f486fbff54486a4e311f923 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Fri, 19 Mar 2021 00:27:48 +0100 Subject: [PATCH 298/497] Update linter.yml --- .github/workflows/linter.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 5c2015aef..0a776871b 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -20,7 +20,7 @@ jobs: - name: Lint Code Base uses: github/super-linter@v3.15.3 env: - FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + FILTER_REGEX_EXCLUDE: check_mk_agent.* SUPPRESS_POSSUM: true VALIDATE_BASH_EXEC: false From 6e3e50194db81b9f86da181e3285dff7b2b6f049 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Mon, 22 Mar 2021 12:59:04 +0100 Subject: [PATCH 299/497] Fix CI --- .github/workflows/linter.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 0a776871b..1e65ec1a1 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -20,7 +20,8 @@ jobs: - name: Lint Code Base uses: github/super-linter@v3.15.3 env: - FILTER_REGEX_EXCLUDE: check_mk_agent.* + FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* + VALIDATE_ALL_CODEBASE: false SUPPRESS_POSSUM: true VALIDATE_BASH_EXEC: false From 8bd6d1dfae881fea5c795be71f9aab87d98fac87 Mon Sep 17 00:00:00 2001 From: priiduonu Date: Mon, 22 Mar 2021 15:09:20 +0200 Subject: [PATCH 300/497] Update ups-apcups.sh (#361) Filter out `LINEV` value as some APC models also return `MAXLINEV` and `MINLINEV` values in `apcaccess` output, therefore ruining the final output. --- snmp/ups-apcups.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/ups-apcups.sh b/snmp/ups-apcups.sh index 64b55c30f..378f2d6f3 100755 --- a/snmp/ups-apcups.sh +++ b/snmp/ups-apcups.sh @@ -19,7 +19,7 @@ BIN_GREP='/usr/bin/grep' ################################################################ TMP=$($BIN_APCS 2>/dev/null) -for value in "LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" +for value in "^LINEV:[0-9]+" "LOADPCT:[0-9.]+" "BCHARGE:[0-9.]+" "TIMELEFT:[0-9.]+" "^BATTV:[0-9.]+" "NOMINV:[0-9]+" "NOMBATTV:[0-9.]+" do OUT=$(echo "$TMP" | $BIN_TR -d ' ' | $BIN_GREP -Eo "$value" | $BIN_CUT -d ":" -f 2) if [ -n "$OUT" ]; then @@ -27,4 +27,4 @@ do else echo "Unknown" fi -done \ No newline at end of file +done From 1222c3c2ce4ab1296930904ec24f394cdec2cb96 Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Mon, 22 Mar 2021 08:10:13 -0500 Subject: [PATCH 301/497] Allow configuring the number of lines read (#358) --- snmp/gpsd | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/gpsd b/snmp/gpsd index 913f43d95..8844bc0a4 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -14,6 +14,7 @@ BIN_GPIPE='/usr/bin/env gpspipe' BIN_GREP='/usr/bin/env grep' BIN_PYTHON='/usr/bin/env python' +LINES=20 # Check for config file CONFIG=$0".conf" @@ -27,7 +28,7 @@ TMPFILE=$(mktemp) trap 'rm -f $TMPFILE' 0 2 3 15 # Write GPSPIPE Data to Temp File -$BIN_GPIPE -w -n 20 > "$TMPFILE" +$BIN_GPIPE -w -n $LINES > "$TMPFILE" # Parse Temp file for GPSD Data VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') From 84b74856d86ccf1058e8c0c38b1e3370b204e913 Mon Sep 17 00:00:00 2001 From: Wheel Date: Thu, 25 Mar 2021 21:14:43 -0400 Subject: [PATCH 302/497] Minor script reverts (osupdate, phpfpm) (#362) * Revert changes made in #355 that broke the script. * Revert partial changes made in #356 By just removing the quotes in line 81 i could make the script work again * osupdate yum revert Realized i had a centos to test * Fix curl error `curl: (3) URL using bad/illegal format or missing URL` Not sure how to properly fix it but moving the hardcode flags to the variable so its not empty did fix it. The curl error caused to push down all values 1 line which made them mismatch in librenms. * Update osupdate * Update phpfpmsp Co-authored-by: Jellyfrog --- snmp/osupdate | 18 ++++++++++++------ snmp/phpfpmsp | 6 +++--- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index 1f4f94852..11a6d9a9b 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -34,7 +34,8 @@ CMD_APK=' version' ################################################################ if command -v zypper &>/dev/null ; then # OpenSUSE - UPDATES=$($BIN_ZYPPER "$CMD_ZYPPER" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-2)); else @@ -42,7 +43,8 @@ if command -v zypper &>/dev/null ; then fi elif command -v dnf &>/dev/null ; then # Fedora - UPDATES=$($BIN_DNF "$CMD_DNF" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else @@ -58,7 +60,8 @@ elif command -v pacman &>/dev/null ; then fi elif command -v yum &>/dev/null ; then # CentOS / Redhat - UPDATES=$($BIN_YUM "$CMD_YUM" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else @@ -66,7 +69,8 @@ elif command -v yum &>/dev/null ; then fi elif command -v apt-get &>/dev/null ; then # Debian / Devuan / Ubuntu - UPDATES=$($BIN_APT "$CMD_APT" | $BIN_GREP $CMD_GREP 'Inst') + # shellcheck disable=SC2086 + UPDATES=$($BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst') if [ "$UPDATES" -ge 1 ]; then echo "$UPDATES"; else @@ -74,7 +78,8 @@ elif command -v apt-get &>/dev/null ; then fi elif command -v pkg &>/dev/null ; then # FreeBSD - UPDATES=$($BIN_PKG "$CMD_PKG" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 1 ]; then echo "$UPDATES"; else @@ -82,7 +87,8 @@ elif command -v pkg &>/dev/null ; then fi elif command -v apk &>/dev/null ; then # Alpine - UPDATES=$($BIN_APK "$CMD_APK" | $BIN_WC $CMD_WC) + # shellcheck disable=SC2086 + UPDATES=$($BIN_APK $CMD_APK | $BIN_WC $CMD_WC) if [ "$UPDATES" -ge 2 ]; then echo $(($UPDATES-1)); else diff --git a/snmp/phpfpmsp b/snmp/phpfpmsp index 6fbf22c57..481dc5d2d 100644 --- a/snmp/phpfpmsp +++ b/snmp/phpfpmsp @@ -42,7 +42,7 @@ # the URL to fetch, change as needed url="http://localhost/status?full" -opts="" +opts="-Ss" # netdata # real-time performance and health monitoring, done right! @@ -77,8 +77,8 @@ phpfpm_slow_requests=0 # local opts="${1}" url="${2}" - - phpfpm_response=("$(curl -Ss "${opts}" "${url}")") + # shellcheck disable=SC2207 + phpfpm_response=($(curl "${opts}" "${url}")) [ $? -ne 0 ] || [ "${#phpfpm_response[@]}" -eq 0 ] && exit 1 if [[ "${phpfpm_response[0]}" != "pool:" \ From bfe18fe32b278eebce4f66949060b213adaa16a9 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Fri, 26 Mar 2021 11:02:01 +0100 Subject: [PATCH 303/497] Fix regressions after linting (#363) --- snmp/distro | 2 +- snmp/ntp-server.sh | 65 ++++++++++++++++++++++++++++++---------------- snmp/sdfsinfo | 3 ++- 3 files changed, 46 insertions(+), 24 deletions(-) diff --git a/snmp/distro b/snmp/distro index f481bbee6..56ae5e940 100755 --- a/snmp/distro +++ b/snmp/distro @@ -56,7 +56,7 @@ elif [ "${OS}" = "Linux" ] ; then REV="" IGNORE_OS_RELEASE=1 if [ -f /usr/bin/lsb_release ] ; then - ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/ //g') + ID=$(lsb_release -i | awk -F ':' '{print $2}' | sed 's/\s//g') fi if [ "${ID}" = "Raspbian" ] ; then DIST="Raspbian $(cat /etc/debian_version)" diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index ba1af1593..30c722041 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -45,18 +45,28 @@ STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f # parse the ntpq info that requires version specific info NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g') if [ $NTPQV = "p11" ]; then - OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') - FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') - SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') - CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') - CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $7}') + # shellcheck disable=SC2086 + OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') + # shellcheck disable=SC2086 + FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') + # shellcheck disable=SC2086 + SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') + # shellcheck disable=SC2086 + CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') + # shellcheck disable=SC2086 + CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}') fi if [ $NTPQV = "p1" ]; then - OFFSET=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $2}') - FREQUENCY=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $3}') - SYS_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $4}') - CLK_JITTER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $5}') - CLK_WANDER=$(echo "$NTPQ_RAW" | $BIN_AWK -F ' ' '{print $6}') + # shellcheck disable=SC2086 + OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}') + # shellcheck disable=SC2086 + FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') + # shellcheck disable=SC2086 + SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') + # shellcheck disable=SC2086 + CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') + # shellcheck disable=SC2086 + CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi VER=$($BIN_NTPD --version) @@ -67,18 +77,29 @@ else fi CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') -TIMESINCERESET=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $1}') -RECEIVEDBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $2}') -FREERECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $3}') -USEDRECEIVEBUFFERS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $4}') -LOWWATERREFILLS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $5}') -DROPPEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $6}') -IGNOREDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $7}') -RECEIVEDPACKETS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $8}') -PACKETSSENT=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $9}') -PACKETSENDFAILURES=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $10}') -#INPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $11}') -USEFULINPUTWAKEUPS=$(echo "$CMD2" | $BIN_AWK -F ' ' '{print $12}') +# shellcheck disable=SC2086 +TIMESINCERESET=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $1}') +# shellcheck disable=SC2086 +RECEIVEDBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $2}') +# shellcheck disable=SC2086 +FREERECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $3}') +# shellcheck disable=SC2086 +USEDRECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $4}') +# shellcheck disable=SC2086 +LOWWATERREFILLS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $5}') +# shellcheck disable=SC2086 +DROPPEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $6}') +# shellcheck disable=SC2086 +IGNOREDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $7}') +# shellcheck disable=SC2086 +RECEIVEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $8}') +# shellcheck disable=SC2086 +PACKETSSENT=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $9}') +# shellcheck disable=SC2086 +PACKETSENDFAILURES=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $10}') +#INPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $11}') +# shellcheck disable=SC2086 +USEFULINPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $12}') echo '{"data":{"offset":"'"$OFFSET"\ '","frequency":"'"$FREQUENCY"\ diff --git a/snmp/sdfsinfo b/snmp/sdfsinfo index 6c83d241d..f65a4a631 100644 --- a/snmp/sdfsinfo +++ b/snmp/sdfsinfo @@ -22,5 +22,6 @@ SDFSCLI_BIN=$(which sdfscli) SDFSCLI_CMD=' --volume-info' GREP_BIN=$(which grep) GREP_CMD=' -o -E ' -DATAPOINTS=$($SDFSCLI_BIN "$SDFSCLI_CMD" | $GREP_BIN "$GREP_CMD" "(([0-9]+)\.?([0-9]+)?)") +# shellcheck disable=SC2086 +DATAPOINTS=$($SDFSCLI_BIN $SDFSCLI_CMD | $GREP_BIN $GREP_CMD "(([0-9]+)\.?([0-9]+)?)") echo "$DATAPOINTS" From 9ca59f93ec71d2c29c7c70fa76876df611d2358e Mon Sep 17 00:00:00 2001 From: Denny Friebe Date: Tue, 20 Apr 2021 23:32:22 +0200 Subject: [PATCH 304/497] Raspberry Pi: Add SNMP extend to monitor IO pins or sensor modules connected to the GPIO header (#364) * Raspberry Pi: Add SNMP extend to monitor IO pins or sensor modules connected to the GPIO header * Raspberry Pi: Add missing sensor types --- snmp/rpigpiomonitor.ini | 92 +++++++++++++ snmp/rpigpiomonitor.php | 290 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 382 insertions(+) create mode 100644 snmp/rpigpiomonitor.ini create mode 100755 snmp/rpigpiomonitor.php diff --git a/snmp/rpigpiomonitor.ini b/snmp/rpigpiomonitor.ini new file mode 100644 index 000000000..777ee5415 --- /dev/null +++ b/snmp/rpigpiomonitor.ini @@ -0,0 +1,92 @@ +; Note +; +; If a configured sensor does not appear in librenms, this may be due to a faulty configuration. +; After the configuration has been changed, it can be checked for possible errors using "rpigpiomonitor.php -validate". +; +; Any change to your configuration requires a manual rediscover of your device. Otherwise, your changes will only be visible once librenms performs an automatic rediscover. + +; Sensor section +; +; Multiple use of the identical name can lead to overwriting of the state configuration of the first sensor with the same name when using states. +; This should therefore only be used once per configuration if possible. When using multiple Raspberry's, this can be used once per device because the serial number of the Raspberry is added to each sensor. +[sensorexample1] + +; Sensor type +; +; This defines the sensor type. +; When reading a normal IO contact, the use of "state" is recommended. +; The following types are possible: +; airflow, ber, charge, chromatic_dispersion, cooling, count, current, dbm, delay, eer, fanspeed +; frequency, humidity, load, loss, power, power_consumed, power_factor, pressure, quality_factor +; runtime, signal, snr, state, temperature, tv_signal, voltage, waterflow, percent +type = state + +; Sensor description +; +; This defines the sensor description which can be seen later in the respective device overview or in the graph. +description = Cabinet door + +; Sensor limits +; +; This defines the limits from when the respective alarm should be triggered. +; Only numeric values are valid! +; If a certain alarm or even all of them should not be used, these values can be omitted or commented out. +;lowlimit = 0 +;lowwarnlimit = 5 +;warnlimit = 30 +;highlimit = 35 + +; Sensor states +; +; This defines the sensor states to be used. If no sensor state is desired it can be omitted or commented out. +; A state sensor is configured in the following format: +; states.state_description.state_variable = state_value +; state_description should be replaced with the desired sensor state description which is displayed in librenms. +; state_variable should be replaced with the variable to be configured. The 2 variables "value" and "generic" must be configured for each state. +; The variable "value" specifies the value when the respective state is to be displayed. +; The variable "generic" can be used to define the background color of the respective state. 0 = green, 1 = orange, 2 = red. +; In the following example, the "Cabinet door" sensor will display "Open" in red for value 1 and "Closed" in green for value 0: +states.Open.value = 1 +states.Open.generic = 2 +states.Closed.value = 0 +states.Closed.generic = 0 + +; GPIO readout of an IO contact +; +; This defines the respective GPIO PIN which is to be read out. +; With a pullup or pulldown circuit the state of a door contact can be determined. +; It is important that the pin numbering scheme of WiringPi is used! +io_gpio_pin = 21 + +; GPIO readout by external program +; +; This defines the external program to be used to read out the respective GPIO values. +; Hereby it is possible to read sensor modules like temperature sensors, air pressure sensors, humidity sensors etc.. +; The program to be used must output a pure numerical value. If this is not the case, the value can be extracted from the output of the program using a helper script, for example. +;external_gpio_reader = /etc/snmp/tempreader.sh + + + +; +; A few more example configurations +; + +; Example configuration which reads an IO contact (pin 25) to determine whether the light of a technical cabinet is switched on or off. +[sensorexample2] +type = state +description = Cabinet lighting +states.Switched on.value = 1 +states.Switched on.generic = 2 +states.Switched off.value = 0 +states.Switched off.generic = 0 +io_gpio_pin = 25 + +; Example configuration which reads a temperature sensor +;[sensorexample3] +;type = temperature +;description = Cabinet temperature +;lowlimit = 0 +;lowwarnlimit = 5 +;warnlimit = 35 +;highlimit = 40 +;external_gpio_reader = /etc/snmp/tempreader.sh diff --git a/snmp/rpigpiomonitor.php b/snmp/rpigpiomonitor.php new file mode 100755 index 000000000..b6614e182 --- /dev/null +++ b/snmp/rpigpiomonitor.php @@ -0,0 +1,290 @@ +#!/usr/bin/env php +. + * + * @link https://librenms.org + * @copyright 2021 Denny Friebe + * @author Denny Friebe + */ + +function parseConfigFile($file, $process_sections = false, $scanner_mode = INI_SCANNER_NORMAL) { + $explode_str = '.'; + $escape_char = "'"; + + // load ini file the normal way + $data = parse_ini_file($file, $process_sections, $scanner_mode); + + if (!$process_sections) { + $data = array($data); + } + + foreach ($data as $section_key => $section) { + // loop inside the section + foreach ($section as $key => $value) { + if (strpos($key, $explode_str)) { + if (substr($key, 0, 1) !== $escape_char) { + // key has a dot. Explode on it, then parse each subkeys + // and set value at the right place thanks to references + $sub_keys = explode($explode_str, $key); + $subs =& $data[$section_key]; + foreach ($sub_keys as $sub_key) { + if (!isset($subs[$sub_key])) { + $subs[$sub_key] = []; + } + $subs =& $subs[$sub_key]; + } + // set the value at the right place + $subs = $value; + // unset the dotted key, we don't need it anymore + unset($data[$section_key][$key]); + } + // we have escaped the key, so we keep dots as they are + else { + $new_key = trim($key, $escape_char); + $data[$section_key][$new_key] = $value; + unset($data[$section_key][$key]); + } + } + } + } + if (!$process_sections) { + $data = $data[0]; + } + return $data; +} + +function validate_sensor_type($type) { + switch ($type) { + case "airflow": + case "ber": + case "charge": + case "chromatic_dispersion": + case "cooling": + case "count": + case "current": + case "dbm": + case "delay": + case "eer": + case "fanspeed": + case "frequency": + case "humidity": + case "load": + case "loss": + case "power": + case "power_consumed": + case "power_factor": + case "pressure": + case "quality_factor": + case "runtime": + case "signal": + case "snr": + case "state": + case "temperature": + case "tv_signal": + case "voltage": + case "waterflow": + case "percent": + return true; + default: + return false; + } +} + +function validate_sensor_states($states) { + if (is_array($states)) { + foreach($states as $state_index => $state) { + if (!isset($state["value"]) || !isset($state["generic"])) { + continue; + } + + if (!is_numeric($state["value"]) || !is_numeric($state["generic"])) { + return false; + } + } + return true; + } + return false; +} + +function validate_sensor_limit($limit) { + if (isset($limit) && is_numeric($limit)) { + return true; + } + return false; +} + +function get_rpi_serial() { + if (file_exists("/proc/device-tree/serial-number")) { + $rpi_serial = file_get_contents("/proc/device-tree/serial-number"); + //During the readout of serial-number additional characters are passed. (at this point I am not sure why) + //To prevent these characters from being output and messing up the whole snmp string we only cut out the needed characters. + $rpi_serial = substr($rpi_serial, 0, 16); + return $rpi_serial; + } + return; +} + +function get_sensor_current_value($sensor_data) { + if (isset($sensor_data["io_gpio_pin"])) { + $sensor_current_value = exec("gpio read " .$sensor_data["io_gpio_pin"]. " 2>&1", $tt, $retcode); + } else { + $sensor_current_value = exec($sensor_data["external_gpio_reader"]. " 2>&1", $tt, $retcode); + } + + if (is_numeric($sensor_current_value)) { + return $sensor_current_value; + } + + return; +} + +function validate_config($config, $rpi_serial) { + if(!$rpi_serial) { + echo "The serial number of your raspberry pi could not be read. Please check if you are using a DT enabled kernel and the file /proc/device-tree/serial-number is present. \n"; + echo "The serial number is required for creating a state sensor so that no sensor with the same name from another RPI overwrites it. \n"; + } + + foreach($config as $sensor_name => $sensor_data) { + $valid = false; + $gpio_reader_valid = true; + + if (!isset($sensor_data["type"]) || validate_sensor_type($sensor_data["type"]) == false) { + echo "No valid type is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["states"]) && validate_sensor_states($sensor_data["states"]) == false) { + echo "No valid states is configured for sensor ".$sensor_name."! \n"; + } + + if (!$sensor_data["description"]) { + echo "No valid description is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["lowlimit"]) && validate_sensor_limit($sensor_data["lowlimit"]) == false) { + echo "No valid lowlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["lowwarnlimit"]) && validate_sensor_limit($sensor_data["lowwarnlimit"]) == false) { + echo "No valid lowwarnlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["warnlimit"]) && validate_sensor_limit($sensor_data["warnlimit"]) == false) { + echo "No valid warnlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (isset($sensor_data["highlimit"]) && validate_sensor_limit($sensor_data["highlimit"]) == false) { + echo "No valid highlimit is configured for sensor ".$sensor_name."! \n"; + } + + if (!isset($sensor_data["io_gpio_pin"]) && !isset($sensor_data["external_gpio_reader"])) { + echo "No IO GPIO pin or external GPIO readout program is configured for sensor ".$sensor_name."! \n"; + $gpio_reader_valid = false; + } + + if (isset($sensor_data["external_gpio_reader"]) && !file_exists($sensor_data["external_gpio_reader"])) { + echo "The external GPIO program for sensor ".$sensor_name." could not be found! Please check if the specified path is correct and the file exists. \n"; + $gpio_reader_valid = false; + } + + if ($gpio_reader_valid) { + $sensor_current_value = get_sensor_current_value($sensor_data); + if (isset($sensor_current_value)) { + echo "Current sensor value for ".$sensor_name.": " . $sensor_current_value . "\n"; + $valid = true; + } else { + echo "The current sensor value for ".$sensor_name." does not seem to be numeric! \n"; + if (isset($sensor_data["io_gpio_pin"])) { + echo "Please check if wiringpi is installed on this device! \n"; + } else { + echo "Please check if the external GPIO program outputs pure numeric values and if the required access rights are available to execute this program. \n"; + } + } + } + + if ($valid) { + echo "The sensor ".$sensor_name." are configured correctly. \n\n"; + } else { + echo "Please check your configuration for sensor ".$sensor_name.". \n\n"; + } + } +} + +function read_sensors($config, $rpi_serial) { + if ($rpi_serial) { + foreach($config as $sensor_name => $sensor_data) { + if ((!isset($sensor_data["type"]) || validate_sensor_type($sensor_data["type"]) == false) + || (isset($sensor_data["states"]) && validate_sensor_states($sensor_data["states"]) == false) + || !$sensor_data["description"] + || (isset($sensor_data["lowlimit"]) && validate_sensor_limit($sensor_data["lowlimit"]) == false) + || (isset($sensor_data["lowwarnlimit"]) && validate_sensor_limit($sensor_data["lowwarnlimit"]) == false) + || (isset($sensor_data["warnlimit"]) && validate_sensor_limit($sensor_data["warnlimit"]) == false) + || (isset($sensor_data["highlimit"]) && validate_sensor_limit($sensor_data["highlimit"]) == false) + || (!isset($sensor_data["io_gpio_pin"]) && !isset($sensor_data["external_gpio_reader"])) + || (isset($sensor_data["external_gpio_reader"]) && !file_exists($sensor_data["external_gpio_reader"]))) { + continue; //The configuration of this sensor is not correct. Skip this one. + } + + $sensor_current_value = get_sensor_current_value($sensor_data); + if (!isset($sensor_current_value)) { + continue; //The value read from the sensor does not correspond to a numerical value. Skip this one. + } + + //If limit is not configured, we initialize the respective key to prevent "Undefined index" notes. + if (!isset($sensor_data["lowlimit"])) { + $sensor_data["lowlimit"] = null; + } + + if (!isset($sensor_data["lowwarnlimit"])) { + $sensor_data["lowwarnlimit"] = null; + } + + if (!isset($sensor_data["warnlimit"])) { + $sensor_data["warnlimit"] = null; + } + + if (!isset($sensor_data["highlimit"])) { + $sensor_data["highlimit"] = null; + } + + echo $sensor_name."_".$rpi_serial.",".$sensor_data["type"].",".$sensor_data["description"].",".$sensor_data["lowlimit"].",".$sensor_data["lowwarnlimit"].",".$sensor_data["warnlimit"].",".$sensor_data["highlimit"]. ";"; + + if(isset($sensor_data["states"])) { + foreach($sensor_data["states"] as $state_descr => $state) { + echo $state["value"].",".$state["generic"].",".$state_descr.";"; + } + } + + echo "\n" . $sensor_current_value . "\n"; + } + } +} + +$config = parseConfigFile('rpigpiomonitor.ini', true); +$rpi_serial = get_rpi_serial(); + +for ($i=0; $i < $argc; $i++) { + if ($argv[$i] == "-validate") { + validate_config($config, $rpi_serial); + return; + } +} + +read_sensors($config, $rpi_serial); +?> + From 39c845eb0a00a38172e6bfb927f0bdb2ab3dbadc Mon Sep 17 00:00:00 2001 From: Tim Pozar Date: Thu, 27 May 2021 12:36:59 -0700 Subject: [PATCH 305/497] Error checking with two line stats (#365) * Error checking with two line stats * Fixed some spacing problems --- snmp/zfs-freebsd | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/snmp/zfs-freebsd b/snmp/zfs-freebsd index e4d27cf80..c1654f47a 100644 --- a/snmp/zfs-freebsd +++ b/snmp/zfs-freebsd @@ -74,9 +74,12 @@ my @sysctls_pull = `/sbin/sysctl -q @to_pull`; foreach my $stat (@sysctls_pull) { chomp( $stat ); my ( $var, $val ) = split(/:/, $stat, 2); - - $val =~ s/^ //; - $sysctls->{$var}=$val; + # If $val is empty, skip it. Likely a var with a newline before + # the data so it is trying to "split" the data. + if( length $val ) { + $val =~ s/^ //; + $sysctls->{$var}=$val; + } } # does not seem to exist for me, but some of these don't seem to be created till needed From 64e13a38acb7e05250965a460add93d553746e8f Mon Sep 17 00:00:00 2001 From: adamus1red Date: Tue, 27 Jul 2021 00:01:03 +0100 Subject: [PATCH 306/497] Change UPS to pull from arg[1] for UPS name (#371) Will still fallback to APCUPS --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index 7fa5a0ba3..b5ba04fe4 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -9,7 +9,7 @@ # 4. restart snmpd on the host # # 5. activate the app for the desired host in LibreNMS # ################################################################ -UPS_NAME='APCUPS' +UPS_NAME="${1:-APCUPS}" PATH=$PATH:/usr/bin:/bin TMP=$(upsc $UPS_NAME 2>/dev/null) From 01122b64586a76975fffdb882a0fe690ac298196 Mon Sep 17 00:00:00 2001 From: Jellyfrog Date: Tue, 27 Jul 2021 10:26:09 +0200 Subject: [PATCH 307/497] Bump Super-linter (#372) * Bump Super-linter * Update linter.yml * Update linter.yml * Update linter.yml --- .github/workflows/linter.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 1e65ec1a1..797b4f20f 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -18,7 +18,7 @@ jobs: fetch-depth: 0 - name: Lint Code Base - uses: github/super-linter@v3.15.3 + uses: github/super-linter@v4 env: FILTER_REGEX_INCLUDE: .*(agent-local|snmp)/.* VALIDATE_ALL_CODEBASE: false @@ -26,6 +26,7 @@ jobs: VALIDATE_BASH_EXEC: false VALIDATE_PYTHON_FLAKE8: false + VALIDATE_PYTHON_MYPY: false VALIDATE_PHP_PHPCS: false VALIDATE_PHP_PSALM: false From a654b9fb01ee51cbf45431d0baf5ed1ca9f6a818 Mon Sep 17 00:00:00 2001 From: Pim van Pelt Date: Wed, 15 Sep 2021 17:50:57 +0200 Subject: [PATCH 308/497] Tag VPP enabled machines to allow for a custom icon in LibreNMS device view (#374) --- snmp/distro | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/distro b/snmp/distro index 56ae5e940..d29d3e805 100755 --- a/snmp/distro +++ b/snmp/distro @@ -147,4 +147,8 @@ elif [ "${OS}" = "FreeBSD" ] ; then fi fi +if [ -f /etc/vpp/startup.conf ]; then + OSSTR="VPP ${OSSTR}" +fi + echo "${OSSTR}" From dd1843284d5526142d9a5ff9e886c6d7d3baec1b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:35:52 -0500 Subject: [PATCH 309/497] fix for ntp-server.sh from #376 --- snmp/ntp-server.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 30c722041..fba25a211 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -69,11 +69,11 @@ if [ $NTPQV = "p1" ]; then CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi -VER=$($BIN_NTPD --version) -if [ "$VER" = '4.2.6p5' ]; then - USECMD=$(echo "$BIN_NTPDC" -c iostats) +VER=$($BIN_NTPD --version 2>&1 | head -n 1) +if [[ "$VER" == *"4.2.6p5"* ]]; then + USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else - USECMD=$(echo "$BIN_NTPQ" -c iostats localhost) + USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) fi CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') From 132de9f5acbfc72ba1fc045b2b42a36ade555829 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:44:07 -0500 Subject: [PATCH 310/497] [[ -> [ tested on FreeBSD and works --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index fba25a211..b3b78fbd6 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -70,7 +70,7 @@ if [ $NTPQV = "p1" ]; then fi VER=$($BIN_NTPD --version 2>&1 | head -n 1) -if [[ "$VER" == *"4.2.6p5"* ]]; then +if [ "$VER" == *"4.2.6p5"* ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From 4d6aa3c5d19b0040d94539d4fe377fd8387481dc Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:49:26 -0500 Subject: [PATCH 311/497] now happy on both linux and freebsd for ntpd --version output --- snmp/ntp-server.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index b3b78fbd6..b59f6c714 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -69,8 +69,8 @@ if [ $NTPQV = "p1" ]; then CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') fi -VER=$($BIN_NTPD --version 2>&1 | head -n 1) -if [ "$VER" == *"4.2.6p5"* ]; then +VER=$($BIN_NTPD --version 2>&1 | cut -d\ -f 2 | head -n 1) +if [ "$VER" == "4.2.6p5" ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From 0b1ff8db545e7ae62dde58e840d170807adb3755 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Oct 2021 14:53:40 -0500 Subject: [PATCH 312/497] pet the linter and hope everything is POSIX happy now.... works on freebsd and linux though --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index b59f6c714..4fb02e8c5 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -70,7 +70,7 @@ if [ $NTPQV = "p1" ]; then fi VER=$($BIN_NTPD --version 2>&1 | cut -d\ -f 2 | head -n 1) -if [ "$VER" == "4.2.6p5" ]; then +if [ "$VER" = "4.2.6p5" ]; then USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) From 9f99a607780054a300633cff966ff57261309cca Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 24 Oct 2021 21:01:40 +0200 Subject: [PATCH 313/497] update yaml loader (#381) --- snmp/puppet_agent.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 9cb64f17b..cc9b36343 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -28,7 +28,7 @@ def parse_yaml_file(filename): try: - yaml_data = yaml.load(open(filename, "r")) + yaml_data = yaml.load(open(filename, "r"), Loader=yaml.FullLoader) msg = None except yaml.scanner.ScannerError as e: yaml_data = [] From 0d36f8aff23dd39f73351a6009cbfc5ee194b2cc Mon Sep 17 00:00:00 2001 From: David Simpson <31688862+ds-04@users.noreply.github.com> Date: Sun, 24 Oct 2021 20:02:34 +0100 Subject: [PATCH 314/497] Add almalinux to distro (#378) --- snmp/distro | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/snmp/distro b/snmp/distro index d29d3e805..a8e9eb0d5 100755 --- a/snmp/distro +++ b/snmp/distro @@ -42,6 +42,11 @@ elif [ "${OS}" = "Linux" ] ; then #PSEUDONAME=$(cat /etc/redhat-release | sed s/.*\(// | sed s/\)//) REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//) + elif [ -f /etc/almalinux-release ] ; then + DIST='AlmaLinux' + #PSEUDONAME=$(cat /etc/almalinux-release | sed s/.*\(// | sed s/\)//) + REV=$(cat /etc/almalinux-release | sed s/.*release\ // | sed s/\ .*//) + elif [ -f /etc/mandrake-release ] ; then DIST='Mandrake' #PSEUDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//) From 9f6861bc2369d2581762c4508558909543cd07e5 Mon Sep 17 00:00:00 2001 From: Peca Nesovanovic <59750439+Npeca75@users.noreply.github.com> Date: Mon, 1 Nov 2021 20:27:02 +0100 Subject: [PATCH 315/497] Mikrotik vlans discovery script (#382) --- snmp/Routeros/LMNS_vlans.scr | 58 ++++++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) create mode 100644 snmp/Routeros/LMNS_vlans.scr diff --git a/snmp/Routeros/LMNS_vlans.scr b/snmp/Routeros/LMNS_vlans.scr new file mode 100644 index 000000000..3ac920ed5 --- /dev/null +++ b/snmp/Routeros/LMNS_vlans.scr @@ -0,0 +1,58 @@ +### +### LibreNMS "glue" script for routeros vlans discovery +### https://github.com/librenms/librenms/pull/13427 +### + +:global vlanst [:toarray ""] +:global vlansu [:toarray ""] + +:foreach i in [/interface bridge vlan find] do={ + :local intf [/interface bridge vlan get $i bridge] + :local vlid [/interface bridge vlan get $i vlan-ids] + + :foreach t in [/interface bridge vlan get $i tagged] do={ + :set $vlanst ($vlanst, "$vlid,$t") + } + + :foreach u in [/interface bridge vlan get $i current-untagged] do={ + :set $vlansu ($vlansu, "$vlid,$u") + } + + :foreach u in [/interface bridge port find where bridge=$intf and pvid=$vlid] do={ + :local iu [/interface bridge port get $u interface] + :local fl 0 + :foreach tmp in $vlansu do={ + :local ar [:toarray $tmp] + :if ((($ar->0) = $vlid) && (($ar->1) = $iu)) do={ + :set fl 1 + } + } + :if ( $fl != 1 ) do={ + :set $vlansu ($vlansu, "$vlid,$iu") + } + } +} + +:foreach vl in [/interface vlan find ] do={ + :local intf [/interface vlan get $vl interface] + :local vlid [/interface vlan get $vl vlan-id] + :local fl 0 + + :foreach tmp in $vlanst do={ + :local ar [:toarray $tmp] + :if ((($ar->0) = $vlid) && (($ar->1) = $intf)) do={ + :set fl 1 + } + } + :if ( $fl != 1 ) do={ + :set $vlanst ($vlanst, "$vlid,$intf") + } +} + +:foreach tmp in $vlanst do={ + :put "T,$tmp" +} + +:foreach tmp in $vlansu do={ + :put "U,$tmp" +} From 12a9cff3c96a042da7a6b43a764d535b478d4957 Mon Sep 17 00:00:00 2001 From: Deltawings <77517677+Deltawings@users.noreply.github.com> Date: Wed, 10 Nov 2021 22:25:51 +0100 Subject: [PATCH 316/497] Constants definition logic modification Correction avoiding script exit when configuration file doesn't exist because MySQL connection constants can also be set at the beginning of this script. --- agent-local/mysql | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/agent-local/mysql b/agent-local/mysql index 0b9419fd0..c56e4e7e1 100755 --- a/agent-local/mysql +++ b/agent-local/mysql @@ -27,9 +27,9 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) # ============================================================================ # CONFIGURATION # ============================================================================ -# Define MySQL connection constants in config.php. Instead of defining -# parameters here, you can define them in another file named the same as this -# file, with a .cnf extension. +# Define MySQL connection constants. Instead of defining parameters here, +# you can also define them in another file named the same as this +# file with a .cnf extension. # ============================================================================ $mysql_user = ''; @@ -77,9 +77,6 @@ echo("<<>>\n"); if (file_exists(__FILE__ . '.cnf' ) ) { require(__FILE__ . '.cnf'); debug('Found configuration file ' . __FILE__ . '.cnf'); -} else { - echo("No ".__FILE__ . ".cnf found!\n"); - exit(); } # Make this a happy little script even when there are errors. From de12ccaff1d68631340c4691fd1bfa84beb78603 Mon Sep 17 00:00:00 2001 From: Alex R Date: Thu, 16 Dec 2021 18:24:29 +0100 Subject: [PATCH 317/497] - added equivalent wear level for nvme ssd (#383) * - added equivalent wear level for nvme ssd - remove touched cache file to avoid no data if config is guessed - take only 1st raw response to avoid taking strings instead of int (eg. adacom devices like supermicro sata dom moule) * fix identation --- snmp/smart | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/snmp/smart b/snmp/smart index 5793b90db..ef1304b0d 100755 --- a/snmp/smart +++ b/snmp/smart @@ -12,11 +12,11 @@ # and/or other materials provided with the distribution. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF @@ -112,6 +112,7 @@ if ( defined( $opts{g} ) ){ $cache='#Could not touch '.$cache. "You will need to manually set it\n". "cache=?\n"; }else{ + system('rm -f '.$cache.'>/dev/null'); $cache='cache='.$cache."\n"; } @@ -175,7 +176,7 @@ if ( defined( $opts{g} ) ){ } - print "useSN=0\n".'smartctl='.$smartctl."\n". + print "useSN=1\n".'smartctl='.$smartctl."\n". $cache. $drive_lines; @@ -283,7 +284,7 @@ foreach my $line ( @disks ){ '233'=>'null', '9'=>'null', ); - + my @outputA; if($output =~ /NVMe Log/) @@ -293,6 +294,7 @@ foreach my $line ( @disks ){ 'Temperature' => 194, 'Power Cycles' => 12, 'Power On Hours' => 9, + 'Percentage Used' => 231, ); foreach(split(/\n/, $output )) { @@ -302,7 +304,11 @@ foreach my $line ( @disks ){ $val =~ s/^\s+|\s+$|\D+//g; if(exists($mappings{$key})) { - $IDs{$mappings{$key}} = $val; + if ($mappings{$key} == 231) { + $IDs{$mappings{$key}} = 100-$val; + } else { + $IDs{$mappings{$key}} = $val; + } } } } @@ -344,7 +350,8 @@ foreach my $line ( @disks ){ ( $id == 231 ) || ( $id == 233 ) ) { - $IDs{$id}=$raw; + my @rawA=split( /\ /, $raw ); + $IDs{$id}=$rawA[0]; } # 9, power on hours @@ -426,7 +433,7 @@ foreach my $line ( @disks ){ # get the drive serial number, if needed my $disk_id=$name; if ( $useSN ){ - while (`$smartctl -i $disk` =~ /Serial Number:(.*)/g) { + while (`$smartctl -i $disk` =~ /(?i)Serial Number:(.*)/g) { $disk_id = $1; $disk_id =~ s/^\s+|\s+$//g; } From 1824e06f4c1130558219a70e3688ea5057a0920d Mon Sep 17 00:00:00 2001 From: Dave King Date: Thu, 16 Dec 2021 10:27:16 -0700 Subject: [PATCH 318/497] improve FreeRADIUS stats efficiency (#389) * agent setting needs numeric comparison * reduce cpu and system calls by using sed instead of grep --- snmp/freeradius.sh | 100 +++++++++++++++++++++++++-------------------- 1 file changed, 56 insertions(+), 44 deletions(-) mode change 100644 => 100755 snmp/freeradius.sh diff --git a/snmp/freeradius.sh b/snmp/freeradius.sh old mode 100644 new mode 100755 index 6a0a29fb6..560c75f06 --- a/snmp/freeradius.sh +++ b/snmp/freeradius.sh @@ -18,54 +18,66 @@ fi # Default radclient access request, shouldn't need to be changed RADIUS_STATUS_CMD='Message-Authenticator = 0x00, FreeRADIUS-Statistics-Type = 31, Response-Packet-Type = Access-Accept' -# Pathes for grep and radclient executables, should work if within PATH -BIN_GREP="$(command -v grep)" +# Paths for sed and radclient executables, should work if within PATH +BIN_SED="$(command -v sed)" BIN_RADCLIENT="$(command -v radclient)" -if [ $AGENT == 1 ]; then +if [ $AGENT -eq 1 ]; then echo "<<>>" fi RESULT=$(echo "$RADIUS_STATUS_CMD" | $BIN_RADCLIENT -x $RADIUS_SERVER:$RADIUS_PORT status $RADIUS_KEY) -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Accepts = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Rejects = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Access-Challenges = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Accounting-Responses = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Internal = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Proxy = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Auth = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Acct = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-Len-Detail = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-In = [[:digit:]]*' -echo "$RESULT" | $BIN_GREP -o 'FreeRADIUS-Queue-PPS-Out = [[:digit:]]*' +if [[ $RESULT != *"Received Access-Accept"* ]] ; then + # A valid result must contain the match string. Otherwise, verify: + # - the FreeRADIUS 'status' virtual server is enabled and running + # - the server, port or key match the 'status' server settings + echo "invalid result from radclient status request, check server settings" + exit 1 +fi + +# Return only those AV pairs expected by the FreeRADIUS app, one per line +# Drop any leading or trailing whitespace +# They may be returned in any order +echo "$RESULT" | $BIN_SED -n \ + -e 's/\s*\(FreeRADIUS-Total-Access-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Accepts = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Rejects = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Access-Challenges = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Auth-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Accounting-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Accounting-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Acct-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Accepts = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Rejects = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Access-Challenges = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Auth-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Accounting-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Accounting-Responses = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Duplicate-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Malformed-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Invalid-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Dropped-Requests = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Total-Proxy-Acct-Unknown-Types = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Internal = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Proxy = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Auth = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Acct = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-Len-Detail = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-PPS-In = [0-9]*\)/\1/p' \ + -e 's/\s*\(FreeRADIUS-Queue-PPS-Out = [0-9]*\)/\1/p' From 5926736508d26d9b5cb6894fd1e341b7cdfb9ea7 Mon Sep 17 00:00:00 2001 From: Plamen Vasilev Date: Thu, 16 Dec 2021 19:27:36 +0200 Subject: [PATCH 319/497] fix occasionally random ordering for fail2ban (#388) If $j->canonical(1); need another changes, please fix that. I get this from: https://github.com/librenms/librenms-agent/pull/240 --- snmp/fail2ban | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 85640021b..42f29ed63 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -157,7 +157,7 @@ sub stats{ } my $j=JSON->new; - + $j->canonical(1); if ( $_[0] ){ $j->pretty(1); return $j->encode( \%toReturn ); From 05a1596c8c5b4b9fcf6a52874af51888429ae778 Mon Sep 17 00:00:00 2001 From: Henne Van Och Date: Thu, 13 Jan 2022 18:18:57 +0100 Subject: [PATCH 320/497] Add supervisord script (#392) --- snmp/supervisord.py | 82 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 snmp/supervisord.py diff --git a/snmp/supervisord.py b/snmp/supervisord.py new file mode 100644 index 000000000..4cc8f70fd --- /dev/null +++ b/snmp/supervisord.py @@ -0,0 +1,82 @@ +#!/usr/bin/env python + +import json +import sys + +from supervisor import xmlrpc + +if sys.version_info.major < 3: + from xmlrpclib import ServerProxy +else: + from xmlrpc.client import ServerProxy + +unix_socket_path = "/var/run/supervisor/supervisor.sock" + +error = 0 +error_string = 0 +processes = [] + +total = { + "STOPPED": 0, + "STARTING": 0, + "RUNNING": 0, + "BACKOFF": 0, + "STOPPING": 0, + "EXITED": 0, + "FATAL": 0, + "UNKNOWN": 0, +} + +try: + server = ServerProxy( + "http://127.0.0.1", + transport=xmlrpc.SupervisorTransport(None, None, "unix://" + unix_socket_path), + ) + + state = server.supervisor.getState()["statename"] + + if state != "RUNNING": + error = 1 + error_string = "Not running" + + for process in server.supervisor.getAllProcessInfo(): + if process["statename"] == "RUNNING": + uptime = process["now"] - process["start"] + else: + uptime = process["stop"] - process["start"] + + uptime = 0 if uptime < 0 else uptime + + processes.append( + { + "name": process["name"], + "group": process["group"], + "statename": process["statename"], + "state": process["state"], + "error": process["spawnerr"] if process["spawnerr"] else None, + "start": process["start"], + "stop": process["stop"], + "now": process["now"], + "uptime": uptime, + } + ) + + total[process["statename"]] += 1 + +except Exception as e: + error = 1 + error_string = repr(e) + +print( + json.dumps( + { + "version": 1, + "error": error, + "errorString": error_string, + "data": { + "total": total, + "processes": processes, + }, + } + ) +) From a24b570837967263ac99ca625b80c17f91304547 Mon Sep 17 00:00:00 2001 From: Thermi Date: Sun, 30 Jan 2022 03:19:52 +0100 Subject: [PATCH 321/497] osupdates: on Arch, try to use checkupdates (#391) --- snmp/osupdate | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/snmp/osupdate b/snmp/osupdate index 11a6d9a9b..4a9b568cc 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -24,6 +24,7 @@ BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' +BIN_CHECKUPDATES='/usr/bin/env checkupdates' BIN_PKG='/usr/sbin/pkg' CMD_PKG=' audit -q -F' BIN_APK='/sbin/apk' @@ -52,7 +53,17 @@ elif command -v dnf &>/dev/null ; then fi elif command -v pacman &>/dev/null ; then # Arch - UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + # calling pacman -Sup does not refresh the package list from the mirrors, + # thus it is not useful to find out if there are updates. Keep the pacman call + # to accomodate users that do not have it. checkupdates is in pacman-contrib. + # also enables snmpd to collect this information if it's not run as root + if command -v checkupdates &>/dev/null ; then + # shellcheck disable=SC2086 + UPDATES=$($BIN_CHECKUPDATES | $BIN_WC $CMD_WC) + else + # shellcheck disable=SC2086 + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + fi if [ "$UPDATES" -ge 1 ]; then echo $(($UPDATES-1)); else From c8fee7b8c3d42574c773ca5a7e73b2d1e52731ab Mon Sep 17 00:00:00 2001 From: Hans Erasmus Date: Fri, 25 Feb 2022 19:22:09 +0200 Subject: [PATCH 322/497] Add rocky linux identification (#397) --- snmp/distro | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/distro b/snmp/distro index a8e9eb0d5..da06f943a 100755 --- a/snmp/distro +++ b/snmp/distro @@ -35,6 +35,8 @@ elif [ "${OS}" = "Linux" ] ; then DIST="Oracle" elif [ -f /etc/rockstor-release ]; then DIST="Rockstor" + elif [ -f /etc/rocky-release ]; then + DIST="Rocky" else DIST="RedHat" fi From cf1394c0d484c1a4c3861cefb4f06db110ab4260 Mon Sep 17 00:00:00 2001 From: Barny Ritchley Date: Thu, 10 Mar 2022 22:56:23 +0000 Subject: [PATCH 323/497] Update opensip3-stats.sh (#396) Update to use curl for management information. Reduces load for frequent polling., --- snmp/opensip3-stats.sh | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/snmp/opensip3-stats.sh b/snmp/opensip3-stats.sh index a3302c6bd..c0d93aeee 100644 --- a/snmp/opensip3-stats.sh +++ b/snmp/opensip3-stats.sh @@ -2,9 +2,10 @@ # Author: Sharad Kumar # This script is for OpenSIPS 3.X + version -total_memory=$(opensips-cli -x mi get_statistics total_size | awk '/shmem:total_size/ { gsub(/[",]/,""); print "Total Memory=" $2}') -used_memory=$(opensips-cli -x mi get_statistics real_used_size | awk '/shmem:real_used_size/ { gsub(/[",]/,""); print "Used Memory=" $2}') -free_memory=$(opensips-cli -x mi get_statistics free_size | awk '/shmem:free_size/ { gsub(/[",]/,""); print "Free Memory=" $2}') +statistics=$(curl -s --header "Content-Type: application/json" -X POST -i http://127.0.0.1:8888/json -d '{"jsonrpc":"2.0","id":1,"method":"get_statistics", "params":[["all"]]}') +total_memory=$(echo "$statistics" | grep -Po '"shmem:total_size":(\d+)' |awk -F':' '{print $3}') +used_memory=$(echo "$statistics" | grep -Po '"shmem:used_size":(\d+)' |awk -F':' '{print $3}') +free_memory=$(echo "$statistics" | grep -Po '"shmem:free_size":(\d+)' |awk -F':' '{print $3}') load_average=$(ps -C opensips -o %cpu | awk '{sum += $1} END {print "Load Average=" sum}') total_files=$(lsof -c opensips | wc -l) From d73256cab43df0c22b188456822587d349b1d1eb Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Apr 2022 22:59:20 -0500 Subject: [PATCH 324/497] localhost -> 127.0.0.1 to work around bug where it complains about the directory not existing --- snmp/mysql | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/mysql b/snmp/mysql index 44e31e289..89e3c9059 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -34,7 +34,7 @@ if (!array_key_exists('SCRIPT_FILENAME', $_SERVER) $mysql_user = ''; $mysql_pass = ''; -$mysql_host = 'localhost'; +$mysql_host = '127.0.0.1'; $mysql_port = 3306; $mysql_ssl = FALSE; # Whether to use SSL to connect to MySQL. $mysql_ssl_key = '/etc/pki/tls/certs/mysql/client-key.pem'; From cfcdb754e1591102906538e3af275e4840a2b9b7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Apr 2022 23:03:53 -0500 Subject: [PATCH 325/497] fix a off by one for nfsstat on new releases --- snmp/fbsdnfsclient | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/snmp/fbsdnfsclient b/snmp/fbsdnfsclient index 7e3d57722..ab5c12694 100644 --- a/snmp/fbsdnfsclient +++ b/snmp/fbsdnfsclient @@ -95,7 +95,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ $nfsstatOutputA[$int]=~s/^ +//; $nfsstatOutputA[$int]=~s/ +/ /g; - if ( $int == 3 ){ + if ( $int == 2 ){ ( $data{Getattr}, $data{Setattr}, @@ -109,7 +109,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 5 ){ + if ( $int == 4 ){ ( $data{Rename}, $data{Link}, @@ -123,7 +123,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 7 ){ + if ( $int == 6 ){ ( $data{Mknod}, $data{Fsstat}, @@ -134,7 +134,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 10 ){ + if ( $int == 9 ){ ( $data{TimedOut}, $data{Invalid}, @@ -145,7 +145,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 13 ){ + if ( $int == 12 ){ ( $data{AttrHits}, $data{AttrMisses}, @@ -159,7 +159,7 @@ while( defined( $nfsstatOutputA[$int] ) ){ } - if ( $int == 15 ){ + if ( $int == 14 ){ ( $data{BioRLHits}, $data{BioRLMisses}, From f8879bd8f183f8184112dc14e966ecdc159c7c77 Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 20 May 2022 06:22:32 +0800 Subject: [PATCH 326/497] fix(dpkg): No such file or directory (#400) --- agent-local/dpkg | 2 ++ 1 file changed, 2 insertions(+) diff --git a/agent-local/dpkg b/agent-local/dpkg index 70917ba72..e89e2a02c 100755 --- a/agent-local/dpkg +++ b/agent-local/dpkg @@ -6,6 +6,8 @@ if [ -x /usr/bin/dpkg-query ]; then DATE=$(date +%s) FILE=/var/cache/librenms/agent-local-dpkg + [ -d /var/cache/librenms ] || mkdir -p /var/cache/librenms + if [ ! -e $FILE ]; then dpkg-query -W --showformat='${Status} ${Package} ${Version} ${Architecture} ${Installed-Size}\n'|grep " installed "|cut -d\ -f4- > $FILE fi From 7a4ff6d5e5587406095ccba63d230ed295482b95 Mon Sep 17 00:00:00 2001 From: adamus1red Date: Thu, 19 May 2022 23:24:12 +0100 Subject: [PATCH 327/497] Update GPSD extension to use python3 (#404) Fix the print statements so it works with python3. Update the BIN_PYTHON to use `python3` --- snmp/gpsd | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/snmp/gpsd b/snmp/gpsd index 8844bc0a4..4d7f3e40a 100755 --- a/snmp/gpsd +++ b/snmp/gpsd @@ -13,7 +13,7 @@ BIN_GPIPE='/usr/bin/env gpspipe' BIN_GREP='/usr/bin/env grep' -BIN_PYTHON='/usr/bin/env python' +BIN_PYTHON='/usr/bin/env python3' LINES=20 # Check for config file @@ -31,15 +31,15 @@ trap 'rm -f $TMPFILE' 0 2 3 15 $BIN_GPIPE -w -n $LINES > "$TMPFILE" # Parse Temp file for GPSD Data -VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["rev"]') -GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["mode"]') -HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["hdop"]') -VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["vdop"]') -LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lat"]') -LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["lon"]') -ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print json.load(sys.stdin)["alt"]') -SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len(json.load(sys.stdin)["satellites"])') -SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]])') +VERSION=$(cat "$TMPFILE" | $BIN_GREP -m 1 "VERSION" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["rev"])') +GPSDMODE=$(cat "$TMPFILE" | $BIN_GREP -m 1 "mode" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["mode"])') +HDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "hdop" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["hdop"])') +VDOP=$(cat "$TMPFILE" | $BIN_GREP -m 1 "vdop" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["vdop"])') +LAT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lat" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["lat"])') +LONG=$(cat "$TMPFILE" | $BIN_GREP -m 1 "lon" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["lon"])') +ALT=$(cat "$TMPFILE" | $BIN_GREP -m 1 "alt" | $BIN_PYTHON -c 'import sys,json;print(json.load(sys.stdin)["alt"])') +SATS=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print(len(json.load(sys.stdin)["satellites"]))') +SATSUSED=$(cat "$TMPFILE" | $BIN_GREP -m 1 "SKY" | $BIN_PYTHON -c 'import sys,json;print(len([sat for sat in json.load(sys.stdin)["satellites"] if sat["used"]]))') # Output info for SNMP Extend echo '{"data":{"mode":"'"$GPSDMODE"'", "hdop":"'"$HDOP"'", "vdop":"'"$VDOP"'", "latitude":"'"$LAT"'", "longitude":"'"$LONG"'", "altitude":"'"$ALT"'", "satellites":"'"$SATS"'", "satellites_used":"'"$SATSUSED"'"}, "error":"0", "errorString":"", "version":"'"$VERSION"'"}' From 7a6115eba8c2b078a7c7e2d8bf08ed0af1d34acc Mon Sep 17 00:00:00 2001 From: Sebastian Heiden Date: Wed, 1 Jun 2022 00:26:08 +0200 Subject: [PATCH 328/497] Provide dhcpd-pools leasefile location (#407) --- snmp/dhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp.py b/snmp/dhcp.py index 532665dd8..a43b38760 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -65,7 +65,7 @@ elif "binding state free" in line: leases["free"] += 1 -shell_cmd = "dhcpd-pools -s i -A" +shell_cmd = "dhcpd-pools -s i -A -l" + configfile["leasefile"] pool_data = ( subprocess.Popen(shell_cmd, shell=True, stdout=subprocess.PIPE) .stdout.read() From c1460ac9bca85903eba393e91b40b29cca009ef7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 18 Jun 2022 04:52:37 -0500 Subject: [PATCH 329/497] Elastic/Opensearch SNMP extend (#408) * mostly done, just need to document stuff at the top * finish docs at the top * remove a redundant line * correct spelling of evictions * remove a unused line --- snmp/opensearch | 274 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 274 insertions(+) create mode 100755 snmp/opensearch diff --git a/snmp/opensearch b/snmp/opensearch new file mode 100755 index 000000000..8515136ec --- /dev/null +++ b/snmp/opensearch @@ -0,0 +1,274 @@ +#!/usr/bin/env perl + +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend opensearch /etc/snmp/extends/opensearch + +Supported command line options are as below. + + -h The host to connect to. + Default: 127.0.0.1 + -p The port to use. + Default: 9200 + -P Pretty print. + +The last is only really relevant to the usage with SNMP. + +=cut + +use warnings; +use strict; +use Getopt::Std; +use JSON; +use LWP::UserAgent (); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "Elastic/Opensearch SNMP extend 0.0.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n" + . "-h The host to connect to.\n" + . " Default: 127.0.0.1\n" + . "-p The port to use.\n" + . " Default: 9200\n" + . "-P Pretty print.\n"; +} + +my $host = '127.0.0.1'; +my $port = 9200; + +#gets the options +my %opts; +getopts( 'h:p:P', \%opts ); +if ( defined( $opts{h} ) ) { + $host = $opts{h}; +} +if ( defined( $opts{p} ) ) { + $port = $opts{p}; +} + +# +my $to_return = { + error => 0, + errorString => '', + version => 1, + date => {}, +}; + +my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; +my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{P} ) { + $json->pretty(); +} + +my $ua = LWP::UserAgent->new( timeout => 10 ); + +my $stats_response = $ua->get($stats_url); +my $stats_json; +if ( $stats_response->is_success ) { + eval { $stats_json = decode_json( $stats_response->decoded_content ); }; + if ($@) { + $to_return->{errorString} = 'Failed to decode the JSON from "' . $stats_url . '"... ' . $@; + $to_return->{error} = 2; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; + } +} +else { + $to_return->{errorString} = 'Failed to get "' . $stats_url . '"... ' . $stats_response->status_line; + $to_return->{error} = 1; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; +} + +my $health_response = $ua->get($health_url); +my $health_json; +if ( $health_response->is_success ) { + eval { $health_json = decode_json( $health_response->decoded_content ); }; + if ($@) { + $to_return->{errorString} = 'Failed to decode the JSON from "' . $health_url . '"... ' . $@; + $to_return->{error} = 2; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; + } +} +else { + $to_return->{errorString} = 'Failed to get "' . $health_url . '"... ' . $health_response->status_line; + $to_return->{error} = 1; + print $json->encode($to_return); + if ( !$opts{P} ) { + print "\n"; + } + exit; +} + +# +# process the health json +# +# +$to_return->{data}{cluster_name} = $health_json->{cluster_name}; +$to_return->{data}{c_nodes} = $health_json->{number_of_nodes}; +$to_return->{data}{c_data_nodes} = $health_json->{number_of_data_nodes}; +$to_return->{data}{c_act_pri_shards} = $health_json->{active_primary_shards}; +$to_return->{data}{c_act_shards} = $health_json->{active_shards}; +$to_return->{data}{c_rel_shards} = $health_json->{relocating_shards}; +$to_return->{data}{c_init_shards} = $health_json->{initializing_shards}; +$to_return->{data}{c_delayed_shards} = $health_json->{delayed_unassigned_shards}; +$to_return->{data}{c_pending_tasks} = $health_json->{number_of_pending_tasks}; +$to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetch}; +$to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; +$to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as_number}; + +# status color to int, nagious style +# green / ok = 0 +# yellow / warning = 1 +# red / critical = 2 +# unknown = 3 +if ( $health_json->{status} =~ /[Gg][Rr][Ee][Ee][Nn]/ ) { + $to_return->{data}{status} = 0; +} +elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { + $to_return->{data}{status} = 1; +} +elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { + $to_return->{data}{status} = 2; +} +else { + $to_return->{data}{status} = 3; +} + +# +# process the stats json, sucking stuff in from under _all.total +# +$to_return->{data}{ttl_ops} = $stats_json->{_all}{total}{translog}{operations}; +$to_return->{data}{ttl_size} = $stats_json->{_all}{total}{translog}{size_in_bytes}; +$to_return->{data}{ttl_uncom_ops} = $stats_json->{_all}{total}{translog}{uncommitted_operations}; +$to_return->{data}{ttl_uncom_size} = $stats_json->{_all}{total}{translog}{uncommitted_size_in_bytes}; +$to_return->{data}{ttl_last_mod_age} = $stats_json->{_all}{total}{translog}{earliest_last_modified_age}; + +$to_return->{data}{ti_total} = $stats_json->{_all}{total}{indexing}{index_total}; +$to_return->{data}{ti_time} = $stats_json->{_all}{total}{indexing}{index_time_in_millis}; +$to_return->{data}{ti_failed} = $stats_json->{_all}{total}{indexing}{index_failed}; +$to_return->{data}{ti_del_total} = $stats_json->{_all}{total}{indexing}{delete_total}; +$to_return->{data}{ti_del_time} = $stats_json->{_all}{total}{indexing}{delete_time_in_millis}; +$to_return->{data}{ti_noop_up_total} = $stats_json->{_all}{total}{indexing}{noop_update_total}; +$to_return->{data}{ti_throttled_time} = $stats_json->{_all}{total}{indexing}{throttle_time_in_millis}; + +if ( $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { + $to_return->{data}{ti_throttled} = 1; +} +else { + $to_return->{data}{ti_throttled} = 0; +} + +$to_return->{data}{ts_q_total} = $stats_json->{_all}{total}{search}{query_total}; +$to_return->{data}{ts_q_time} = $stats_json->{_all}{total}{search}{query_time_in_millis}; +$to_return->{data}{ts_f_total} = $stats_json->{_all}{total}{search}{fetch_total}; +$to_return->{data}{ts_f_time} = $stats_json->{_all}{total}{search}{fetch_time_in_millis}; +$to_return->{data}{ts_sc_total} = $stats_json->{_all}{total}{search}{scroll_total}; +$to_return->{data}{ts_sc_time} = $stats_json->{_all}{total}{search}{scroll_time_in_millis}; +$to_return->{data}{ts_su_total} = $stats_json->{_all}{total}{search}{suggest_total}; +$to_return->{data}{ts_su_time} = $stats_json->{_all}{total}{search}{suggest_time_in_millis}; + +$to_return->{data}{tr_total} = $stats_json->{_all}{total}{refresh}{total}; +$to_return->{data}{tr_time} = $stats_json->{_all}{total}{refresh}{total_time_in_millis}; +$to_return->{data}{tr_ext_total} = $stats_json->{_all}{total}{refresh}{external_total}; +$to_return->{data}{tr_ext_time} = $stats_json->{_all}{total}{refresh}{external_total_time_in_millis}; + +$to_return->{data}{tf_total} = $stats_json->{_all}{total}{flush}{total}; +$to_return->{data}{tf_periodic} = $stats_json->{_all}{total}{flush}{periodic}; +$to_return->{data}{tf_time} = $stats_json->{_all}{total}{flush}{total_time_in_millis}; + +$to_return->{data}{tqc_size} = $stats_json->{_all}{total}{query_cache}{memory_size_in_bytes}; +$to_return->{data}{tqc_total} = $stats_json->{_all}{total}{query_cache}{total_count}; +$to_return->{data}{tqc_hit} = $stats_json->{_all}{total}{query_cache}{hit_count}; +$to_return->{data}{tqc_miss} = $stats_json->{_all}{total}{query_cache}{miss_count}; +$to_return->{data}{tqc_miss} = $stats_json->{_all}{total}{query_cache}{miss_count}; +$to_return->{data}{tqc_cache_size} = $stats_json->{_all}{total}{query_cache}{cache_size}; +$to_return->{data}{tqc_cache_count} = $stats_json->{_all}{total}{query_cache}{cache_count}; +$to_return->{data}{tqc_evictions} = $stats_json->{_all}{total}{query_cache}{evictions}; + +$to_return->{data}{tg_total} = $stats_json->{_all}{total}{get}{total}; +$to_return->{data}{tg_time} = $stats_json->{_all}{total}{get}{time_in_millis}; +$to_return->{data}{tg_exists_total} = $stats_json->{_all}{total}{get}{exists_total}; +$to_return->{data}{tg_exists_time} = $stats_json->{_all}{total}{get}{exists_time_in_millis}; +$to_return->{data}{tg_missing_total} = $stats_json->{_all}{total}{get}{missing_total}; +$to_return->{data}{tg_missing_time} = $stats_json->{_all}{total}{get}{missing_time_in_millis}; + +$to_return->{data}{tm_total} = $stats_json->{_all}{total}{merges}{total}; +$to_return->{data}{tm_time} = $stats_json->{_all}{total}{merges}{total_time_in_millis}; +$to_return->{data}{tm_docs} = $stats_json->{_all}{total}{merges}{total_docs}; +$to_return->{data}{tm_size} = $stats_json->{_all}{total}{merges}{total_size_in_bytes}; +$to_return->{data}{tm_throttled_time} = $stats_json->{_all}{total}{merges}{total_throttled_time_in_millis}; +$to_return->{data}{tm_throttled_size} = $stats_json->{_all}{total}{merges}{total_auto_throttle_in_bytes}; + +$to_return->{data}{tw_total} = $stats_json->{_all}{total}{warmer}{total}; +$to_return->{data}{tw_time} = $stats_json->{_all}{total}{warmer}{total_time_in_millis}; + +$to_return->{data}{tfd_size} = $stats_json->{_all}{total}{fielddata}{memory_size_in_bytes}; +$to_return->{data}{tfd_evictions} = $stats_json->{_all}{total}{fielddata}{evictions}; + +$to_return->{data}{tseg_count} = $stats_json->{_all}{total}{segments}{count}; +$to_return->{data}{tseg_size} = $stats_json->{_all}{total}{segments}{memory_in_bytes}; +$to_return->{data}{tseg_terms_size} = $stats_json->{_all}{total}{segments}{terms_memory_in_bytes}; +$to_return->{data}{tseg_fields_size} = $stats_json->{_all}{total}{segments}{stored_fields_memory_in_bytes}; +$to_return->{data}{tseg_tvector_size} = $stats_json->{_all}{total}{segments}{term_vectors_memory_in_bytes}; +$to_return->{data}{tseg_norms_size} = $stats_json->{_all}{total}{segments}{norms_memory_in_bytes}; +$to_return->{data}{tseg_points_size} = $stats_json->{_all}{total}{segments}{points_memory_in_bytes}; +$to_return->{data}{tseg_docval_size} = $stats_json->{_all}{total}{segments}{doc_values_memory_in_bytes}; +$to_return->{data}{tseg_indwrt_size} = $stats_json->{_all}{total}{segments}{index_writer_memory_in_bytes}; +$to_return->{data}{tseg_vermap_size} = $stats_json->{_all}{total}{segments}{version_map_memory_in_bytes}; +$to_return->{data}{tseg_fbs_size} = $stats_json->{_all}{total}{segments}{fixed_bit_set_memory_in_bytes}; + +$to_return->{data}{trc_size} = $stats_json->{_all}{total}{request_cache}{memory_size_in_bytes}; +$to_return->{data}{trc_evictions} = $stats_json->{_all}{total}{request_cache}{evictions}; +$to_return->{data}{trc_hits} = $stats_json->{_all}{total}{request_cache}{hit_count}; +$to_return->{data}{trc_misses} = $stats_json->{_all}{total}{request_cache}{miss_count}; + +$to_return->{data}{tst_size} = $stats_json->{_all}{total}{store}{size_in_bytes}; +$to_return->{data}{tst_res_size} = $stats_json->{_all}{total}{store}{reserved_in_bytes}; + +print $json->encode($to_return); +if ( !$opts{P} ) { + print "\n"; +} +exit 0; From ce756a9227cf6032f44da8f30313183797d1c9ab Mon Sep 17 00:00:00 2001 From: Trae Santiago <249409+Trae32566@users.noreply.github.com> Date: Sat, 18 Jun 2022 04:58:33 -0500 Subject: [PATCH 330/497] Replaced mdadm script with a newer, more flexible version (#401) * Replaced mdadm script with a newer, more flexible version * added basic error handling, and conditionals for missing array * added fallback json squashing code if jq is missing * fixed comments and shellcheck * spacing --- snmp/mdadm | 202 ++++++++++++++++++++++------------------------------- 1 file changed, 84 insertions(+), 118 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index 5e820c808..57628f698 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -1,120 +1,86 @@ -#!/bin/bash - -CAT=/bin/cat -LS=/bin/ls -BASENAME=/usr/bin/basename -REALPATH=/usr/bin/realpath - -CONFIGFILE=/etc/snmp/mdadm.conf -if [ -f $CONFIGFILE ] ; then - # shellcheck disable=SC1090 - . $CONFIGFILE -fi - -VERSION=1 -ERROR_CODE=0 -ERROR_STRING="" - -OUTPUT_DATA='[' - -# use 'ls' command to check if md blocks exist -if $LS /dev/md?* 1> /dev/null 2>&1 ; then - for ARRAY_BLOCKDEVICE in $($LS -1 /dev/md?*) ; do - RAID="/sys/block/"$($BASENAME "$($REALPATH "$ARRAY_BLOCKDEVICE")") - - # ignore arrays with no slaves - if [ -z "$($LS -1 "$RAID"/slaves 2> /dev/null)" ] ; then - continue +#!/usr/bin/env bash +# MDADM SNMP extension for LibreNMS +# Version +extendVer='2.0.0' +# Initial portion of json +mdadmSNMPOutput='{ "data": [' + +# Outputs a list of devices +list_devices() { + for device in "${1}/slaves/"*; do + if [ "${2,,}" == 'count' ]; then + ((devCount++)) + elif [ "${2,,}" != 'missing' ] || [ ! -e "${device}" ]; then + printf '%b\t "%s"' "${multiDisk}" "$(basename "${device}")" + multiDisk=',\n' fi - # ignore "non existing" arrays - if [ ! -f "$RAID/md/degraded" ] ; then - continue - fi - - if [[ $($BASENAME "$ARRAY_BLOCKDEVICE") = [[:digit:]] ]] ; then - RAID_NAME=$($BASENAME "$RAID") - else - RAID_NAME=$($BASENAME "$ARRAY_BLOCKDEVICE") - fi - RAID_DEV_LIST=$($LS "$RAID"/slaves/) - RAID_LEVEL=$($CAT "$RAID"/md/level) - RAID_DISC_COUNT=$($CAT "$RAID"/md/raid_disks| cut -d' ' -f1) - RAID_STATE=$($CAT "$RAID"/md/array_state) - RAID_ACTION=$($CAT "$RAID"/md/sync_action) - RAID_DEGRADED=$($CAT "$RAID"/md/degraded) - - if [ "$RAID_SYNC_SPEED" = "none" ] ; then - RAID_SYNC_SPEED=0 - else - let "RAID_SYNC_SPEED=$($CAT "$RAID"/md/sync_speed)*1024" - fi - - if [ "$($CAT "$RAID"/md/sync_completed)" != "none" ] ; then - let "RAID_SYNC_COMPLETED=100*$($CAT "$RAID"/md/sync_completed)" - elif [ "$RAID_DEGRADED" -eq 1 ] ; then - RAID_SYNC_COMPLETED=0 - else - RAID_SYNC_COMPLETED=100 - fi - - # divide with 2 to size like in /proc/mdstat - # and multiply with 1024 to get size in bytes - let "RAID_SIZE=$($CAT "$RAID"/size)*1024/2" - - RAID_DEVICE_LIST='[' - ALL_DEVICE_COUNT=0 - for D in $RAID_DEV_LIST ; do - RAID_DEVICE_LIST=$RAID_DEVICE_LIST'"'$D'",' - let "ALL_DEVICE_COUNT+=1" - done - if [ ${#RAID_DEVICE_LIST} -gt 3 ] ; then - RAID_DEVICE_LIST=${RAID_DEVICE_LIST: : -1} - fi - RAID_DEVICE_LIST=$RAID_DEVICE_LIST']' - - RAID_MISSING_DEVICES='[' - for D in $RAID_DEV_LIST ; do - if [ -L "$RAID"/slaves/"$D" ] && [ -f "$RAID"/slaves/"$D" ] ; then - RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES'"'$D'",' - fi - done - if [ ${#RAID_MISSING_DEVICES} -gt 3 ] ; then - RAID_MISSING_DEVICES=${RAID_MISSING_DEVICES: : -1} - fi - RAID_MISSING_DEVICES=$RAID_MISSING_DEVICES']' - - let "RAID_HOTSPARE_COUNT=ALL_DEVICE_COUNT-RAID_DISC_COUNT" - if [ "$RAID_HOTSPARE_COUNT" -lt 0 ] ; then - RAID_HOTSPARE_COUNT=0 - fi - - ARRAY_DATA='{'\ -'"name":"'$RAID_NAME\ -'","level":"'$RAID_LEVEL\ -'","size":"'$RAID_SIZE\ -'","disc_count":"'$RAID_DISC_COUNT\ -'","hotspare_count":"'$RAID_HOTSPARE_COUNT\ -'","device_list":'$RAID_DEVICE_LIST\ -',"missing_device_list":'$RAID_MISSING_DEVICES\ -',"state":"'$RAID_STATE\ -'","action":"'$RAID_ACTION\ -'","degraded":"'$RAID_DEGRADED\ -'","sync_speed":"'$RAID_SYNC_SPEED\ -'","sync_completed":"'$RAID_SYNC_COMPLETED\ -'"},' - - OUTPUT_DATA=$OUTPUT_DATA$ARRAY_DATA done - - OUTPUT_DATA=${OUTPUT_DATA: : -1}']' -else - OUTPUT_DATA=${OUTPUT_DATA}']' -fi - -OUTPUT='{"data":'$OUTPUT_DATA\ -',"error":"'$ERROR_CODE\ -'","errorString":"'$ERROR_STRING\ -'","version":"'$VERSION'"}' - -echo "$OUTPUT" - + [ "${devCount}" ] && echo "${devCount}" +} + +# Outputs either 0, 100, or the value of the file referenced +maybe_get() { + if [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then + cat "${1}" + else + echo 0 + fi +} + +main() { + if ! which 'jq' > /dev/null 2>&1; then + errorCode=1 + # The underscore here is a hack since we have to strip spaces without jq + errorString='jq_missing!' + elif stat "/dev/md"[[:digit:]]* > /dev/null 2>&1; then + for mdadmArray in "/dev/md"[[:digit:]]*; do + # Ignore partitions + [[ "${mdadmArray}" =~ '/dev/md'[[:digit:]]+'p' ]] && continue + + mdadmName="$(basename "$(realpath "${mdadmArray}")")" + mdadmSysDev="/sys/block/${mdadmName}" + + read -r -d '' mdadmOutput < /dev/null || sed 's/\s//g' <<< "${mdadmSNMPOutput//$'\n'/}${metadataOutput//$'\n'/}" +} + +main "${@}" From a5b21403c8162239c7268bd076e41ab5da19f693 Mon Sep 17 00:00:00 2001 From: Thermi Date: Sat, 18 Jun 2022 11:59:19 +0200 Subject: [PATCH 331/497] osupdates: unpriv implementation alternative (#395) --- snmp/unpriv/osupdates/Readme.md | 9 ++ .../librenms-osupdates-generate.service | 8 ++ .../librenms-osupdates-generate.timer | 11 ++ .../osupdates/osupdates-unpriv-gather.sh | 11 ++ .../osupdates/osupdates-unpriv-generate.sh | 115 ++++++++++++++++++ 5 files changed, 154 insertions(+) create mode 100644 snmp/unpriv/osupdates/Readme.md create mode 100644 snmp/unpriv/osupdates/librenms-osupdates-generate.service create mode 100644 snmp/unpriv/osupdates/librenms-osupdates-generate.timer create mode 100644 snmp/unpriv/osupdates/osupdates-unpriv-gather.sh create mode 100644 snmp/unpriv/osupdates/osupdates-unpriv-generate.sh diff --git a/snmp/unpriv/osupdates/Readme.md b/snmp/unpriv/osupdates/Readme.md new file mode 100644 index 000000000..d778a5d04 --- /dev/null +++ b/snmp/unpriv/osupdates/Readme.md @@ -0,0 +1,9 @@ +# osupdates + +## Installation + +1. Copy shell scripts into /usr/local/bin/ +2. Make them executable +3. Copy timer and service unit into /etc/systemd/system/ +4. Activate timer (`systemctl enable --now librenms-osupdates-generate.timer`) +5. Set `extend osupdate /usr/local/bin/osupdates-unpriv-gather.sh` in `/etc/snmp/snmpd.conf` diff --git a/snmp/unpriv/osupdates/librenms-osupdates-generate.service b/snmp/unpriv/osupdates/librenms-osupdates-generate.service new file mode 100644 index 000000000..238e2e586 --- /dev/null +++ b/snmp/unpriv/osupdates/librenms-osupdates-generate.service @@ -0,0 +1,8 @@ +# librenms-osupdates-generate.service + +[Unit] +Description=generate osupdates information + +[Service] +ExecStart=/usr/local/bin/osupdates-unpriv-generate.sh + diff --git a/snmp/unpriv/osupdates/librenms-osupdates-generate.timer b/snmp/unpriv/osupdates/librenms-osupdates-generate.timer new file mode 100644 index 000000000..e40fb7e37 --- /dev/null +++ b/snmp/unpriv/osupdates/librenms-osupdates-generate.timer @@ -0,0 +1,11 @@ +# librenms-osupdates-generate.timer + +[Unit] +Description=generates osupdates information minutely + +[Timer] +OnCalendar=hourly +Persistent=true + +[Install] +WantedBy=timers.target diff --git a/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh b/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh new file mode 100644 index 000000000..a337c5981 --- /dev/null +++ b/snmp/unpriv/osupdates/osupdates-unpriv-gather.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +SNMP_PERSISTENT_DIR="$(net-snmp-config --persistent-directory)" +UNPRIV_SHARED_FILE="$SNMP_PERSISTENT_DIR/osupdates/stats.txt" + +if [ -f "$UNPRIV_SHARED_FILE" ]; then + cat "$UNPRIV_SHARED_FILE" +else + echo "0" + logger -p daemon.error -t "osupdates-unpriv" Reading osupdate data from file "$UNPRIV_SHARED_FILE" failed! +fi diff --git a/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh new file mode 100644 index 000000000..08a6bca44 --- /dev/null +++ b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh @@ -0,0 +1,115 @@ +#!/usr/bin/env bash +################################################################ +# copy this script to /etc/snmp/ and make it executable: # +# chmod +x /etc/snmp/osupdate # +# ------------------------------------------------------------ # +# edit your snmpd.conf and include: # +# extend osupdate /etc/snmp/osupdate # +#--------------------------------------------------------------# +# restart snmpd and activate the app for desired host # +#--------------------------------------------------------------# +# please make sure you have the path/binaries below # +################################################################ +BIN_WC='/usr/bin/env wc' +BIN_GREP='/usr/bin/env grep' +CMD_GREP='-c' +CMD_WC='-l' +BIN_ZYPPER='/usr/bin/env zypper' +CMD_ZYPPER='-q lu' +BIN_YUM='/usr/bin/env yum' +CMD_YUM='-q check-update' +BIN_DNF='/usr/bin/env dnf' +CMD_DNF='-q check-update' +BIN_APT='/usr/bin/env apt-get' +CMD_APT='-qq -s upgrade' +BIN_PACMAN='/usr/bin/env pacman' +CMD_PACMAN='-Sup' +BIN_CHECKUPDATES='/usr/bin/env checkupdates' +BIN_PKG='/usr/sbin/pkg' +CMD_PKG=' audit -q -F' +BIN_APK='/sbin/apk' +CMD_APK=' version' +SNMP_PERSISTENT_DIR="$(net-snmp-config --persistent-directory)" +UNPRIV_SHARED_FILE="$SNMP_PERSISTENT_DIR/osupdates/stats.txt" + +mkdir -p "$(dirname "$UNPRIV_SHARED_FILE" )" +exec > "$UNPRIV_SHARED_FILE" + +################################################################ +# Don't change anything unless you know what are you doing # +################################################################ +if command -v zypper &>/dev/null ; then + # OpenSUSE + # shellcheck disable=SC2086 + UPDATES=$($BIN_ZYPPER $CMD_ZYPPER | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then + echo $(($UPDATES-2)); + else + echo "0"; + fi +elif command -v dnf &>/dev/null ; then + # Fedora + # shellcheck disable=SC2086 + UPDATES=$($BIN_DNF $CMD_DNF | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v pacman &>/dev/null ; then + # Arch + # calling pacman -Sup does not refresh the package list from the mirrors, + # thus it is not useful to find out if there are updates. Keep the pacman call + # to accomodate users that do not have it. checkupdates is in pacman-contrib. + # also enables snmpd to collect this information if it's not run as root + if command -v checkupdates &>/dev/null ; then + # shellcheck disable=SC2086 + UPDATES=$($BIN_CHECKUPDATES | $BIN_WC $CMD_WC) + else + # shellcheck disable=SC2086 + UPDATES=$($BIN_PACMAN $CMD_PACMAN | $BIN_WC $CMD_WC) + fi + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v yum &>/dev/null ; then + # CentOS / Redhat + # shellcheck disable=SC2086 + UPDATES=$($BIN_YUM $CMD_YUM | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +elif command -v apt-get &>/dev/null ; then + # Debian / Devuan / Ubuntu + # shellcheck disable=SC2086 + UPDATES=$($BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst') + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi +elif command -v pkg &>/dev/null ; then + # FreeBSD + # shellcheck disable=SC2086 + UPDATES=$($BIN_PKG $CMD_PKG | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi +elif command -v apk &>/dev/null ; then + # Alpine + # shellcheck disable=SC2086 + UPDATES=$($BIN_APK $CMD_APK | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 2 ]; then + echo $(($UPDATES-1)); + else + echo "0"; + fi +else + echo "0"; +fi From 56828b8c846a70d7a998f78c340c785d0b1444c1 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sun, 19 Jun 2022 13:38:41 +0200 Subject: [PATCH 332/497] mdadm sync_complete fix (#409) --- snmp/mdadm | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index 57628f698..28f351382 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -40,7 +40,16 @@ main() { mdadmName="$(basename "$(realpath "${mdadmArray}")")" mdadmSysDev="/sys/block/${mdadmName}" + degraded=$(maybe_get "${mdadmSysDev}/md/degraded") + syncSpeed=$(($(maybe_get "${mdadmSysDev}/md/sync_speed") * 1024)) + + syncCompleted=$(maybe_get "${mdadmSysDev}/md/sync_completed") + if [ $syncCompleted -eq 0 ] && [ $degraded -eq 0 ] && [ $syncSpeed -eq 0 ]; then + syncCompleted="100" + fi + read -r -d '' mdadmOutput < Date: Sun, 10 Jul 2022 11:51:11 +0200 Subject: [PATCH 333/497] Add support for PhotonOS's tdnf. (#411) Example output: root [ /home/ives ]# tdnf -q check-update Linux-PAM.x86_64 1.4.0-5.ph4 photon-updates cloud-init.noarch 22.2.2-1.ph4 photon-updates curl.x86_64 7.83.1-2.ph4 photon-updates curl-libs.x86_64 7.83.1-2.ph4 photon-updates openssl.x86_64 3.0.3-3.ph4 photon-updates So a simple line count should suffice. Output of the script: root [ /home/ives ]# /bin/docker-osupdate 5 --- snmp/osupdate | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 4a9b568cc..87e16873f 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -20,6 +20,8 @@ BIN_YUM='/usr/bin/env yum' CMD_YUM='-q check-update' BIN_DNF='/usr/bin/env dnf' CMD_DNF='-q check-update' +BIN_TDNF='/usr/bin/env tdnf' +CMD_TDNF='-q check-update' BIN_APT='/usr/bin/env apt-get' CMD_APT='-qq -s upgrade' BIN_PACMAN='/usr/bin/env pacman' @@ -51,6 +53,15 @@ elif command -v dnf &>/dev/null ; then else echo "0"; fi +elif command -v tdnf &>/dev/null ; then + # PhotonOS + # shellcheck disable=SC2086 + UPDATES=$($BIN_TDNF $CMD_TDNF | $BIN_WC $CMD_WC) + if [ "$UPDATES" -ge 1 ]; then + echo "$UPDATES"; + else + echo "0"; + fi elif command -v pacman &>/dev/null ; then # Arch # calling pacman -Sup does not refresh the package list from the mirrors, From cc515970ad1fe4773312fca14c96b26e3d7efb9a Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 14 Jul 2022 14:59:03 -0500 Subject: [PATCH 334/497] add extend for polling/monitoring CAPEv2 (#412) --- snmp/cape | 448 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 448 insertions(+) create mode 100755 snmp/cape diff --git a/snmp/cape b/snmp/cape new file mode 100755 index 000000000..d3748ad64 --- /dev/null +++ b/snmp/cape @@ -0,0 +1,448 @@ +#!/usr/bin/env perl + +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend cape /etc/snmp/extends/cape + +Supported command line options are as below. + + -c Config INI file. + Default: /usr/local/etc/cape_extend.ini + +The defeault setttings are... + + # DBI connection DSN + dsn=dbi:Pg:dbname=cape + + # DB user + user=cape + + # DB PW + pass= + + # CAPEv2 cuckoo log file + clog=/opt/CAPEv2/log/cuckoo.log + + # CAPEv2 process log file + plog=/opt/CAPEv2/log/process.log + + # 0/1 for if it is okay for the process log to not exist + # this enables it to work with cuckoo as well as CAPEv2 + mplogok=1 + + # list of ignores + ignores=/usr/local/etc/cape_extend.ignores + + # send errors along for inclusion in the event log + sendErrors=1 + + # send criticals along for inclusion in the event log + sendCriticals=1 + + # send warnings along for inclusion in the event log + sendWarnings= 1 + + # don't use analysis_started_on. analysis_finished_on. processing_started_on, + # processing_finished_on, signatures_started_on, signatures_finished_on, + # reporting_started_on, or reporting_finished_on with the SQL statement + # + # This is specifically for supporting ancient cuckoo instances. + cuckoosql=0 + +The ignores file will only be used if it exists. The format is as below. + + + +This the ignore level will be lower cased. The seperator bween the level and +the regexp pattern is /[\ \t]+/. So if you want to ignore the two warnings +generated when VM traffic is dropped, you would use the two lines such as below. + + WARNING PCAP file does not exist at path + WARNING Unable to Run Suricata: Pcap file + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::ReadBackwards; +use JSON; +use Config::Tiny; +use DBI; +use Time::Piece; +use File::Slurp; + +sub version { + print "cape v. 0.0.1\n"; +} + +sub help { + &version; + + print ' + +-c Config INI file. + Default: /usr/local/etc/cape_extend.ini +'; +} + +# get the commandline options +my $help = 0; +my $version = 0; +my $ini_file = '/usr/local/etc/cape_extend.ini'; +Getopt::Long::Configure('no_ignore_case'); +Getopt::Long::Configure('bundling'); +GetOptions( + 'version' => \$version, + 'v' => \$version, + 'help' => \$help, + 'h' => \$help, + 'i=s' => \$ini_file, +); + +# print version or help if requested +if ($help) { + &help; + exit 42; +} +if ($version) { + &version; + exit 42; +} + +# time +my $current_time = time; +my $target_time = $current_time - 300; + +my $return_json = { + data => { + error => 0, + errors => [], + info => 0, + debug => 0, + warning => 0, + warnigns => [], + critical => 0, + criticals => [], + banned => 0, + pending => 0, + running => 0, + completed => 0, + distributed => 0, + reported => 0, + recovered => 0, + failed_analysis => 0, + failed_processing => 0, + failed_reporting => 0, + packages => {}, + dropped_files => 0, + running_processes => 0, + api_calls => 0, + domains => 0, + signatures_total => 0, + signatures_alert => 0, + files_written => 0, + registry_keys_modified => 0, + crash_issues => 0, + anti_issues => 0, + timedout => 0, + pkg_stats => {}, + total_tasks => 0, + }, + error => 0, + errorString => '', + version => 1, +}; + +# used for checking if the level value is somethingw understand +my $level_check = { info => 1, debug => 1, error => 1, warning => 1, critical => 1 }; + +# read the config and put together the defaults +my $defaults = { + dsn => 'dbi:Pg:dbname=cape', + user => 'cape', + pass => '', + clog => '/opt/CAPEv2/log/cuckoo.log', + plog => '/opt/CAPEv2/log/process.log', + mplogok => 1, + ignores => '/usr/local/etc/cape_extend.ignores', + sendErrors => 1, + sendCriticals => 1, + sendWarnings => 1, + cuckoosql => 0, +}; +my $config = Config::Tiny->read( $ini_file, 'utf8' ); +if ( !defined($config) ) { + $config = $defaults; +} +else { + $config = $config->{_}; + + # reel in the defaults + foreach my $default_key ( keys( %{$defaults} ) ) { + if ( !defined( $config->{$default_key} ) ) { + $config->{$default_key} = $defaults->{$default_key}; + } + } +} + +# read in the ignore file +my $ignores = { info => [], debug => [], error => [], warning => [], critical => [] }; +if ( -f $config->{ignores} ) { + my $ignore_raw = read_file( $config->{ignores} ); + my @ignore_split = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ignore_raw ) ) ); + foreach my $to_ignore (@ignore_split) { + my ( $ignore_level, $pattern ) = split( /[\ \t]+/, $to_ignore, 2 ); + if ( defined($ignore_level) and defined($pattern) ) { + $ignore_level = lc($ignore_level); + push( @{ $ignores->{$ignore_level} }, $pattern ); + } + } +} + +# put together the list of logs to read +my @logs; +if ( !-f $config->{clog} ) { + $return_json->{error} = '"' . $defaults->{clog} . '" does not exist'; +} +else { + push( @logs, $config->{clog} ); +} +if ( !-f $config->{plog} && !$config->{mplogok} ) { + $return_json->{error} = '"' . $defaults->{clog} . '" does not exist'; +} +else { + push( @logs, $config->{plog} ); +} + +my $process_loop = 0; +my $process_logs = 1; +while ( $process_logs && defined( $logs[$process_loop] ) ) { + my $log = $logs[$process_loop]; + + my $bw; + eval { $bw = File::ReadBackwards->new($log); }; + + my $continue = 1; + my $current_entry = ''; + while ( defined($bw) && defined( my $log_line = $bw->readline ) && $continue ) { + $current_entry = $log_line . $current_entry; + if ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + { + # parse it and blank it for when we get to the next one. + my ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + $current_entry = ''; + + # chomp off the seconds place after the , + $time =~ s/\,.*//; + my $t = Time::Piece->strptime( $date . 'T' . $time, '%Y-%m-%dT%H:%M:%S' ); + + if ( $t->epoch <= $target_time ) { + $continue = 0; + } + else { + $level = lc($level); + $level =~ s/\://; + if ( defined( $level_check->{$level} ) ) { + my $add_it = 1; + my $ignore_int = 0; + foreach ( @{ $ignores->{$level} } ) { + my $test = $_; + if ( $entry =~ /$test/ ) { + $add_it = 0; + } + $ignore_int++; + } + if ($add_it) { + $return_json->{data}->{$level}++; + if ( $level eq 'error' and $config->{sendErrors} ) { + push( @{ $return_json->{data}->{errors} }, $entry ); + } + elsif ( $level eq 'warning' and $config->{sendWarnings} ) { + push( @{ $return_json->{data}->{warnings} }, $entry ); + } + elsif ( $level eq 'criticals' and $config->{sendCriticals} ) { + push( @{ $return_json->{data}->{criticals} }, $entry ); + } + } + } + } + } + } + + $process_loop++; +} + +my $query; +if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { + $query + = "select status,package from tasks where ( added_on > FROM_UNIXTIME('" + . $target_time + . "')) or " + . "( started_on > FROM_UNIXTIME('" + . $target_time + . "')) or " + . "( completed_on > FROM_UNIXTIME('" + . $target_time . "')); "; +} +else { + $query + = "select status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' )"; + if ( !$config->{cuckoosql} ) { + $query + = $query + . " or ( analysis_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "(analysis_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( processing_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( processing_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( signatures_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( signatures_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( reporting_started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + . "( reporting_finished_on > CURRENT_TIMESTAMP - interval '5 minutes' );"; + } + else { + $query = $query . ';'; + } +} + +eval { + my $dbh = DBI->connect( $config->{dsn}, $config->{user}, $config->{pass} ) || die($DBI::errstr); + my $sth = $dbh->prepare($query); + $sth->execute; + my $task_status; + my $task_package; + my $dropped_files; + my $running_processes; + my $api_calls; + my $domains; + my $signatures_total; + my $signatures_alert; + my $files_written; + my $registry_keys_modified; + my $crash_issues; + my $anti_issues; + my $timedout; + # + # MySQL is basically for old Cuckoo support. + # CAPEv2 does not really play nice with it because of column issues + # + if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { + $sth->bind_columns( undef, \$task_status, \$task_package ); + while ( $sth->fetch ) { + if ( defined( $return_json->{data}->{$task_status} ) ) { + $return_json->{data}->{$task_status}++; + $return_json->{data}->{total_tasks}++; + } + } + } + else { + $sth->bind_columns( + undef, \$task_status, \$task_package, \$dropped_files, + \$running_processes, \$api_calls, \$domains, \$signatures_total, + \$signatures_alert, \$files_written, \$registry_keys_modified, \$crash_issues, + \$anti_issues, \$timedout + ); + while ( $sth->fetch ) { + if ( defined( $return_json->{data}->{$task_status} ) ) { + $return_json->{data}->{$task_status}++; + $return_json->{data}->{total_tasks}++; + } + + # skip blank entries + if ( $task_package ne '' ) { + if ( defined( $return_json->{data}->{packages}->{$task_package} ) ) { + $return_json->{data}->{packages}->{$task_package}++; + } + else { + $return_json->{data}->{packages}->{$task_package} = 1; + } + } + + # if dropped files is defined and not blank, the rest will + # if this is blank then runstatistics is conf/reporting.conf + if ( defined($dropped_files) ) { + $return_json->{data}->{dropped_files} += $dropped_files; + $return_json->{data}->{running_processes} += $running_processes; + $return_json->{data}->{api_calls} += $api_calls; + $return_json->{data}->{domains} += $domains; + $return_json->{data}->{signatures_total} += $signatures_total; + $return_json->{data}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{files_written} += $files_written; + $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; + $return_json->{data}->{crash_issues} += $crash_issues; + $return_json->{data}->{anti_issues} += $anti_issues; + + # put per package stats together + if ( $task_package ne '' ) { + if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { + $return_json->{data}->{pkg_stats}->{$task_package} = { + dropped_files => $dropped_files, + running_processes => $running_processes, + api_calls => $api_calls, + domains => $domains, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issues => $crash_issues, + anti_issues => $anti_issues + }; + } + else { + $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; + $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; + $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; + $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; + $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} + += $registry_keys_modified; + $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; + $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + } + } + } + + # timedout value is not a perl boolean + if ( $timedout =~ /^[Ff]/ ) { + $return_json->{data}->{timedout}++; + } + } + } +}; +if ($@) { + $return_json->{error} = 2; + $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; +} + +print encode_json($return_json) . "\n"; From 9943133ac70bf80c41997ae02c853196d8805e08 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 15 Jul 2022 12:10:20 -0500 Subject: [PATCH 335/497] typo correction for CAPE monitor... warnigns -> warnings #413 --- snmp/cape | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/cape b/snmp/cape index d3748ad64..2f859e552 100755 --- a/snmp/cape +++ b/snmp/cape @@ -146,7 +146,7 @@ my $return_json = { info => 0, debug => 0, warning => 0, - warnigns => [], + warnings => [], critical => 0, criticals => [], banned => 0, From 160b3252c95d8f69b1d45642257648ed842dbcd7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 18 Jul 2022 18:20:31 -0500 Subject: [PATCH 336/497] add per timeslot stats for CAPE (#414) --- snmp/cape | 176 ++++++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 164 insertions(+), 12 deletions(-) diff --git a/snmp/cape b/snmp/cape index 2f859e552..cde895569 100755 --- a/snmp/cape +++ b/snmp/cape @@ -96,6 +96,7 @@ use Config::Tiny; use DBI; use Time::Piece; use File::Slurp; +use Statistics::Lite qw(:all); sub version { print "cape v. 0.0.1\n"; @@ -179,6 +180,27 @@ my $return_json = { version => 1, }; +my @stats_for = ( + 'dropped_files', 'running_processes', 'api_calls', 'domains', + 'signatures_total', 'signatures_alert', 'files_written', 'registry_keys_modified', + 'crash_issues', 'anti_issues', +); + +my $ag_stats = { + dropped_files => [], + running_processes => [], + api_calls => [], + domains => [], + signatures_total => [], + signatures_alert => [], + files_written => [], + registry_keys_modified => [], + crash_issues => [], + anti_issues => [], +}; + +my $pkg_stats = {}; + # used for checking if the level value is somethingw understand my $level_check = { info => 1, debug => 1, error => 1, warning => 1, critical => 1 }; @@ -387,19 +409,57 @@ eval { } } - # if dropped files is defined and not blank, the rest will - # if this is blank then runstatistics is conf/reporting.conf - if ( defined($dropped_files) ) { - $return_json->{data}->{dropped_files} += $dropped_files; - $return_json->{data}->{running_processes} += $running_processes; - $return_json->{data}->{api_calls} += $api_calls; - $return_json->{data}->{domains} += $domains; - $return_json->{data}->{signatures_total} += $signatures_total; - $return_json->{data}->{signatures_alert} += $signatures_alert; - $return_json->{data}->{files_written} += $files_written; + if ( defined($running_processes) ) { + $return_json->{data}->{running_processes} += $running_processes; + push( @{ $ag_stats->{running_processes} }, $running_processes ); + } + else { + + } + + if ( defined($api_calls) ) { + $return_json->{data}->{api_calls} += $api_calls; + push( @{ $ag_stats->{api_calls} }, $api_calls ); + } + + if ( defined($domains) ) { + $return_json->{data}->{domains} += $domains; + push( @{ $ag_stats->{domains} }, $domains ); + } + + if ( defined($signatures_alert) ) { + $return_json->{data}->{signatures_alert} += $signatures_alert; + push( @{ $ag_stats->{signatures_alert} }, $signatures_alert ); + } + + if ( defined($signatures_total) ) { + $return_json->{data}->{signatures_total} += $signatures_total; + push( @{ $ag_stats->{signatures_total} }, $signatures_total ); + } + + if ( defined($files_written) ) { + $return_json->{data}->{files_written} += $files_written; + push( @{ $ag_stats->{files_written} }, $files_written ); + } + + if ( defined($registry_keys_modified) ) { $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; - $return_json->{data}->{crash_issues} += $crash_issues; - $return_json->{data}->{anti_issues} += $anti_issues; + push( @{ $ag_stats->{registry_keys_modified} }, $registry_keys_modified ); + } + + if ( defined($crash_issues) ) { + $return_json->{data}->{crash_issues} += $crash_issues; + push( @{ $ag_stats->{crash_issues} }, $crash_issues ); + } + + if ( defined($anti_issues) ) { + $return_json->{data}->{anti_issues} += $anti_issues; + push( @{ $ag_stats->{anti_issues} }, $anti_issues ); + } + + if ( defined($dropped_files) ) { + $return_json->{data}->{dropped_files} += $dropped_files; + push( @{ $ag_stats->{dropped_files} }, $dropped_files ); # put per package stats together if ( $task_package ne '' ) { @@ -416,6 +476,18 @@ eval { crash_issues => $crash_issues, anti_issues => $anti_issues }; + $pkg_stats->{$task_package} = { + dropped_files => [$dropped_files], + running_processes => [$running_processes], + api_calls => [$api_calls], + domains => [$domains], + signatures_total => [$signatures_total], + signatures_alert => [$signatures_alert], + files_written => [$files_written], + registry_keys_modified => [$registry_keys_modified], + crash_issues => [$crash_issues], + anti_issues => [$anti_issues] + }; } else { $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; @@ -429,6 +501,17 @@ eval { += $registry_keys_modified; $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + + push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); + push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); + push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); + push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); + push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); + push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); + push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); + push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); + push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); + push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); } } } @@ -445,4 +528,73 @@ if ($@) { $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; } +# compute the aggregate stats +foreach my $current_entry (@stats_for) { + if ( $#{ $ag_stats->{$current_entry} } > 0 ) { + $return_json->{data}{ 'min.' . $current_entry } = min( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'max.' . $current_entry } = max( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'range.' . $current_entry } = range( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'mean.' . $current_entry } = mean( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'median.' . $current_entry } = median( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'mode.' . $current_entry } = mode( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'v.' . $current_entry } = variance( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'sd.' . $current_entry } = stddev( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'vp.' . $current_entry } = variancep( @{ $ag_stats->{$current_entry} } ); + $return_json->{data}{ 'sdp.' . $current_entry } = stddevp( @{ $ag_stats->{$current_entry} } ); + } + else { + $return_json->{data}{ 'min.' . $current_entry } = 0; + $return_json->{data}{ 'max.' . $current_entry } = 0; + $return_json->{data}{ 'range.' . $current_entry } = 0; + $return_json->{data}{ 'mean.' . $current_entry } = 0; + $return_json->{data}{ 'median.' . $current_entry } = 0; + $return_json->{data}{ 'mode.' . $current_entry } = 0; + $return_json->{data}{ 'v.' . $current_entry } = 0; + $return_json->{data}{ 'sd.' . $current_entry } = 0; + $return_json->{data}{ 'vp.' . $current_entry } = 0; + $return_json->{data}{ 'sdp.' . $current_entry } = 0; + } + +} + +# compute the stats for each package +foreach my $current_pkg ( keys( %{$pkg_stats} ) ) { + foreach my $current_entry (@stats_for) { + if ( $#{ $pkg_stats->{$current_pkg}{$current_entry} } > 0 ) { + $return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry } + = min( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry } + = max( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry } + = range( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry } + = mean( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry } + = median( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry } + = mode( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry } + = variance( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry } + = stddev( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry } + = variancep( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry } + = stddevp( @{ $pkg_stats->{$current_pkg}{$current_entry} } ); + } + else { + $return_json->{data}{pkg_stats}{$current_pkg}{ 'min.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'max.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'range.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mean.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'median.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'mode.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'v.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sd.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'vp.' . $current_entry } = 0; + $return_json->{data}{pkg_stats}{$current_pkg}{ 'sdp.' . $current_entry } = 0; + } + } +} + print encode_json($return_json) . "\n"; From b7795b5445aa1b21a2e17f41ac22eaafda16cb5c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 20 Jul 2022 12:42:17 -0500 Subject: [PATCH 337/497] cape: typo fix so it will send criticals if requested (#416) --- snmp/cape | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/cape b/snmp/cape index cde895569..e0c2c795a 100755 --- a/snmp/cape +++ b/snmp/cape @@ -310,7 +310,7 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { elsif ( $level eq 'warning' and $config->{sendWarnings} ) { push( @{ $return_json->{data}->{warnings} }, $entry ); } - elsif ( $level eq 'criticals' and $config->{sendCriticals} ) { + elsif ( $level eq 'critical' and $config->{sendCriticals} ) { push( @{ $return_json->{data}->{criticals} }, $entry ); } } From d8682b6f1629affbf6fcee7deb210e19cfc10992 Mon Sep 17 00:00:00 2001 From: Tim de Boer Date: Thu, 28 Jul 2022 15:55:01 +0200 Subject: [PATCH 338/497] Rename LMNS_vlans.scr to LNMS_vlans.scr (#410) --- snmp/Routeros/{LMNS_vlans.scr => LNMS_vlans.scr} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename snmp/Routeros/{LMNS_vlans.scr => LNMS_vlans.scr} (100%) diff --git a/snmp/Routeros/LMNS_vlans.scr b/snmp/Routeros/LNMS_vlans.scr similarity index 100% rename from snmp/Routeros/LMNS_vlans.scr rename to snmp/Routeros/LNMS_vlans.scr From bc4383f626debea2ce8e53ef6fc9dfa83c7abe20 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 31 Jul 2022 19:57:40 -0500 Subject: [PATCH 339/497] add lnms_return_optimizer (#417) --- utils/lnms_return_optimizer | 77 +++++++++++++++++++++++++++++++++++++ 1 file changed, 77 insertions(+) create mode 100755 utils/lnms_return_optimizer diff --git a/utils/lnms_return_optimizer b/utils/lnms_return_optimizer new file mode 100755 index 000000000..884d01cf9 --- /dev/null +++ b/utils/lnms_return_optimizer @@ -0,0 +1,77 @@ +#!/usr/bin/env perl + +use MIME::Base64; +use Gzip::Faster; +use Getopt::Long; +use warnings; +use strict; + +sub version{ + print "lnms_return_optimizer v. 0.0.1\n"; +} + + + +my $version; +my $help; +my $extract; +my $new_line; +GetOptions( + 'e' => \$extract, + 'n' => \$new_line, + 'h' => \$help, + 'help' => \$help, + 'v' => \$version, + 'version' => \$version, + ); + +if ($version) { + version; + exit; +} + +if ($help) { + version; + + print ' +foo | lnms_return_otimizer + +-e Operate in extract mode instead. +-n Include newlines with the base64. + +-h Print help. +--help Print help. +-v Print version info. +--version Print version info. +'; + + exit; +} + +my $data = ''; +foreach my $line () { + $data = $data . $line; +} + +if ($extract) { + if ($data =~ /^[A-Za-z0-9\/\+\n]+\=*\n*$/ ) { + print gunzip(decode_base64($data)); + }else { + print $data; + } +}else { + # gzip and print encode in base64 + # base64 is needed as snmp does not like + my $compressed = encode_base64(gzip($data)); + if (!$new_line) { + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + } + + # check which is smaller and prints it + if (length($compressed) > length($data)) { + print $data; + }else { + print $compressed; + } +} From 750bbe86540372bbf283209fa0950606d68081a7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 8 Aug 2022 09:15:57 -0500 Subject: [PATCH 340/497] lnms_return_optimizer => librenms_return_optimizer (#419) --- utils/{lnms_return_optimizer => librenms_return_optimizer} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename utils/{lnms_return_optimizer => librenms_return_optimizer} (100%) diff --git a/utils/lnms_return_optimizer b/utils/librenms_return_optimizer similarity index 100% rename from utils/lnms_return_optimizer rename to utils/librenms_return_optimizer From 70e8d53fb37be6e835ef6efb75f7e5cc73327f5b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 11 Aug 2022 12:01:16 -0500 Subject: [PATCH 341/497] update date librenms_return_optimizer to take input via pipe or post -- and running the command #420 --- utils/librenms_return_optimizer | 44 ++++++++++++++++++++++++++++----- 1 file changed, 38 insertions(+), 6 deletions(-) diff --git a/utils/librenms_return_optimizer b/utils/librenms_return_optimizer index 884d01cf9..f910930a0 100755 --- a/utils/librenms_return_optimizer +++ b/utils/librenms_return_optimizer @@ -1,17 +1,39 @@ #!/usr/bin/env perl +#Copyright (c) 2022, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + use MIME::Base64; use Gzip::Faster; use Getopt::Long; use warnings; use strict; +use IPC::Cmd qw[ run ]; sub version{ - print "lnms_return_optimizer v. 0.0.1\n"; + print "librenms_return_optimizer v. 0.0.2\n"; } - - my $version; my $help; my $extract; @@ -34,7 +56,9 @@ if ($help) { version; print ' -foo | lnms_return_otimizer +foo | librenms_return_otimizer +librenms_return_otimizer -- /path/to/some/extend -some -args + -e Operate in extract mode instead. -n Include newlines with the base64. @@ -49,8 +73,16 @@ foo | lnms_return_otimizer } my $data = ''; -foreach my $line () { - $data = $data . $line; +if ( ! $extract ) { + if (defined($ARGV[0])) { + my( $success, $error_message, $full_buf, $stdout_buf, $stderr_buf ) = + run( command => \@ARGV, verbose => 0 ); + $data=join '', @$full_buf; + }else { + foreach my $line () { + $data = $data . $line; + } + } } if ($extract) { From 9174f2cccc69d5ee2d293cf795a0a47b298f4a6a Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 15 Aug 2022 12:46:19 -0500 Subject: [PATCH 342/497] add unassigned shards for opensearch #421 --- snmp/opensearch | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/opensearch b/snmp/opensearch index 8515136ec..2b133141f 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -153,6 +153,7 @@ $to_return->{data}{c_act_shards} = $health_json->{active_shards}; $to_return->{data}{c_rel_shards} = $health_json->{relocating_shards}; $to_return->{data}{c_init_shards} = $health_json->{initializing_shards}; $to_return->{data}{c_delayed_shards} = $health_json->{delayed_unassigned_shards}; +$to_return->{data}{c_unass_shards} = $health_json->{unassigned_shards}; $to_return->{data}{c_pending_tasks} = $health_json->{number_of_pending_tasks}; $to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetch}; $to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; From bd40b2f3325b5f3d87b137947e3adf794ac26b66 Mon Sep 17 00:00:00 2001 From: oernii Date: Sat, 10 Sep 2022 17:22:52 +0000 Subject: [PATCH 343/497] mdadm sync check - completed percent (#415) Co-authored-by: oernii --- snmp/mdadm | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index 28f351382..a9a1bfefe 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -20,7 +20,9 @@ list_devices() { # Outputs either 0, 100, or the value of the file referenced maybe_get() { - if [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then + if [[ $(cat "${1}") =~ " / " ]]; then + echo "100 * $(cat ${1})" | bc + elif [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then cat "${1}" else echo 0 From ed4d4753afea2558a655f01ff9f21d06378083c7 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Mon, 19 Sep 2022 18:11:42 -0700 Subject: [PATCH 344/497] Add pwrstatd script (#423) --- snmp/pwrstatd.py | 153 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 153 insertions(+) create mode 100755 snmp/pwrstatd.py diff --git a/snmp/pwrstatd.py b/snmp/pwrstatd.py new file mode 100755 index 000000000..919c01e4e --- /dev/null +++ b/snmp/pwrstatd.py @@ -0,0 +1,153 @@ +#!/usr/bin/env python +# +# Name: Pwrstatd Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "pwrstat -status" output for ingestion into +# LibreNMS via the pwrstatd application. Pwrstatd is a service/application +# provided by CyberPower for their personal PSUs. The software is available +# here: https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/pwrstatd.py +# 2. Edit your snmpd.conf and include: +# extend pwrstatd /etc/snmp/pwrstatd.py +# 3. (Optional) Create a /etc/snmp/pwrstatd.json file and specify the path to the pwrstat +# executable as json [the default path is /sbin/pwrstat]: +# ``` +# { +# "pwrstat_cmd": "/sbin/pwrstat" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. +# TODO: +# 1. If CyberPower ends up building support to collect data from multiple PSUs on a +# single computer, then this script will be updated to support that. + +import json +import re +import subprocess + +CONFIG_FILE = "/etc/snmp/pwrstatd.json" +KEY_TO_VARIABLE_MAP = { + "Firmware Number": "sn", + "Rating Voltage": "vrating", + "Rating Power": "wrating", + "Utility Voltage": "vutility", + "Output Voltage": "voutput", + "Battery Capacity": "pcapacity", + "Remaining Runtime": "mruntime", + "Load": "wload", +} +PWRSTAT_ARGS = "-status" +PWRSTAT_CMD = "/sbin/pwrstat" +REGEX_PATTERN = r"([\w\s]+)\.\.+ (.*)" + + +def value_sanitizer(key, value): + """ + value_sanitizer(): Parses the given value to extract the exact numerical (or string) value. + + Inputs: + key: The key portion of the output after regex parsing (clean). + value: The entire value portion of the output after regex parsing (dirty). + Outputs: + str, int, or None depending on what key is given. + """ + if key == "Firmware Number": + return str(value) + elif key in ( + "Rating Voltage", + "Rating Power", + "Utility Voltage", + "Output Voltage", + "Battery Capacity", + "Remaining Runtime", + "Load", + ): + return int(value.split(" ")[0]) + else: + return None + + +def main(): + """ + main(): main function performs pwrstat command execution and output parsing. + + Inputs: + None + Outputs: + None + """ + pwrstat_cmd = PWRSTAT_CMD + output_data = {"errorString": "", "error": 0, "version": 1, "data": []} + psu_data = { + "mruntime": None, + "pcapacity": None, + "pload": None, + "sn": None, + "voutput": None, + "vrating": None, + "vutility": None, + "wload": None, + "wrating": None, + } + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "pwrstat_cmd" in config_file.keys(): + pwrstat_cmd = config_file["pwrstat_cmd"] + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + output_data["error"] = 1 + output_data["errorString"] = "Config file Error: '%s'" % err + + try: + # Execute pwrstat command + pwrstat_process = subprocess.Popen( + [pwrstat_cmd, PWRSTAT_ARGS], + stdin=None, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + poutput, perror = pwrstat_process.communicate() + + if perror: + raise OSError(perror.decode("utf-8")) + + # Parse pwrstat command output and collect data. + for line in poutput.decode("utf-8").split("\n"): + regex_search = re.search(REGEX_PATTERN, line.strip()) + if not regex_search: + continue + + try: + key = regex_search.groups()[0] + value = regex_search.groups()[1] + if key in KEY_TO_VARIABLE_MAP.keys(): + psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) + except IndexError as err: + output_data["error"] = 1 + output_data["errorString"] = "Command Output Parsing Error: '%s'" % err + continue + + # Manually calculate percentage load on PSU + if psu_data["wrating"]: + # int to float hacks in-place for python2 backwards compatibility + psu_data["pload"] = int( + float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 + ) + except (subprocess.CalledProcessError, OSError) as err: + output_data["error"] = 1 + output_data["errorString"] = "Command Execution Error: '%s'" % err + + output_data["data"].append(psu_data) + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From fada8761253942ffc0fea025d5e629cb55fc9d62 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Fri, 14 Oct 2022 11:42:28 -0700 Subject: [PATCH 345/497] Add systemd script (#426) * Add systemd script * Fixing a couple python black styling errors --- snmp/systemd.py | 191 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 191 insertions(+) create mode 100755 snmp/systemd.py diff --git a/snmp/systemd.py b/snmp/systemd.py new file mode 100755 index 000000000..1e6b47d51 --- /dev/null +++ b/snmp/systemd.py @@ -0,0 +1,191 @@ +#!/usr/bin/env python +# +# Name: Systemd Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "systemctl" output for ingestion into +# LibreNMS via the systemd application. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/systemd.py +# 2. Edit your snmpd.conf and include: +# extend systemdd /etc/snmp/systemd.py +# 3. (Optional) Create a /etc/snmp/systemd.json file and specify: +# a.) "systemctl_cmd" - String path to the systemctl binary ["/usr/bin/systemctl"] +# b.) "include_inactive_units" - True/False string to include inactive units in +# results ["False"] +# ``` +# { +# "systemctl_cmd": "/bin/systemctl", +# "include_inactive_units": "True" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/systemd.json" +SYSTEMCTL_ARGS = ["list-units", "--full", "--plain", "--no-legend", "--no-page"] +SYSTEMCTL_CMD = "/usr/bin/systemctl" +# The unit "sub" type is the only unit state that has three layers of +# depth. "load" and "active" are two layers deep. +SYSTEMCTL_TERNARY_STATES = ["sub"] + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + systemctl_cmd: The full systemctl command to execute. + """ + systemctl_cmd = [SYSTEMCTL_CMD] + systemctl_args = SYSTEMCTL_ARGS + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + systemctl_cmd = [config_file["systemctl_cmd"]] + if config_file["include_inactive_units"].lower().strip() == "true": + systemctl_args.append("--all") + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full systemctl command. + systemctl_cmd.extend(systemctl_args) + return systemctl_cmd + + +def command_executor(systemctl_cmd): + """ + command_executor(): Execute the systemctl command and return the output. + + Inputs: + systemctl_cmd: The full systemctl command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + try: + # Execute systemctl command + poutput = subprocess.check_output( + systemctl_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def unit_parser(line, systemctl_data): + """ + unit_parser(): Parses a unit's line for load, active, and sub status. Each + of those values is incremented in the global systemctl_data + variable as-well-as the totals for each category. + + Inputs: + line: The unit's status line from the systemctl stdout. + Outputs: + None + """ + line_parsed = line.strip().split() + + try: + # Reverse the to grab the sub type + # (ignoring periods in the service name). + parsed_results = { + "load": line_parsed[1], + "active": line_parsed[2], + "sub": {line_parsed[0][::-1].split(".")[0][::-1]: line_parsed[3]}, + } + except (IndexError) as err: + error_handler("Command Output Parsing Error", err) + + for state_type, state_value in parsed_results.items(): + if state_type not in systemctl_data: + systemctl_data[state_type] = {} + if state_type not in SYSTEMCTL_TERNARY_STATES: + systemctl_data[state_type][state_value] = ( + 1 + if state_value not in systemctl_data[state_type] + else (systemctl_data[state_type][state_value] + 1) + ) + systemctl_data[state_type]["total"] = ( + 1 + if "total" not in systemctl_data[state_type] + else (systemctl_data[state_type]["total"] + 1) + ) + else: + for sub_state_type, sub_state_value in state_value.items(): + if sub_state_type not in systemctl_data[state_type]: + systemctl_data[state_type][sub_state_type] = {} + systemctl_data[state_type][sub_state_type][sub_state_value] = ( + 1 + if sub_state_value not in systemctl_data[state_type][sub_state_type] + else ( + systemctl_data[state_type][sub_state_type][sub_state_value] + 1 + ) + ) + systemctl_data[state_type][sub_state_type]["total"] = ( + 1 + if "total" not in systemctl_data[state_type][sub_state_type] + else (systemctl_data[state_type][sub_state_type]["total"] + 1) + ) + return systemctl_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and unit stdout parsing. Then it prints out the expected json output + for the systemd application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + systemctl_cmd = config_file_parser() + + # Execute systemctl command and parse output. + for line in command_executor(systemctl_cmd).decode("utf-8").split("\n"): + if not line: + continue + output_data["data"] = unit_parser(line, output_data["data"]) + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From 0cb634493b8303142aef1f03425d3d64b53e8df3 Mon Sep 17 00:00:00 2001 From: Trae Santiago <249409+Trae32566@users.noreply.github.com> Date: Fri, 14 Oct 2022 13:42:47 -0500 Subject: [PATCH 346/497] fixed conditional not checking for file before `cat`ing, and removed reliance on bc (#425) --- snmp/mdadm | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/mdadm b/snmp/mdadm index a9a1bfefe..b0c9b3c5f 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -20,8 +20,8 @@ list_devices() { # Outputs either 0, 100, or the value of the file referenced maybe_get() { - if [[ $(cat "${1}") =~ " / " ]]; then - echo "100 * $(cat ${1})" | bc + if [ -f "${1}" ] && [[ $(cat "${1}") =~ " / " ]]; then + echo $((100 * $(cat "${1}"))) elif [ -f "${1}" ] && [ "$(cat "${1}")" != 'none' ]; then cat "${1}" else From 6322f1791da1dcbb5e2797d92f30aee1a971350a Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Sun, 16 Oct 2022 11:15:25 -0500 Subject: [PATCH 347/497] Fix memcached security vulnerability (#428) Add snmp extend script --- agent-local/memcached | 3 +-- snmp/memcached | 22 ++++++++++++++++++++++ 2 files changed, 23 insertions(+), 2 deletions(-) create mode 100755 snmp/memcached diff --git a/agent-local/memcached b/agent-local/memcached index b4ed626f8..6ba751b69 100755 --- a/agent-local/memcached +++ b/agent-local/memcached @@ -7,7 +7,6 @@ $stats = $m->getStats(); if(is_array($stats)) { echo("<<>>\n"); - echo(serialize($m->getStats())); + echo(json_encode($m->getStats())); echo("\n"); } -?> diff --git a/snmp/memcached b/snmp/memcached new file mode 100755 index 000000000..f0d7844ee --- /dev/null +++ b/snmp/memcached @@ -0,0 +1,22 @@ +#!/usr/bin/php + false, + 'error' => 99, + 'errorString' => 'php-memcached extension is not available, it must be installed and enabled.', + 'version' => '1.1' + )); + exit; +} + +$m = new Memcached(); +$m->addServer('localhost', 11211); + +echo json_encode(array( + 'data' => $m->getStats(), + 'error' => $m->getLastErrorCode(), + 'errorString' => $m->getLastErrorMessage(), + 'version' => '1.1', +)); From a31bcf0d295d01f727a62beca4769fd06e27bdff Mon Sep 17 00:00:00 2001 From: Oskar Szafraniec Date: Wed, 26 Oct 2022 15:13:20 +0200 Subject: [PATCH 348/497] run_query PHP 8.1 hot fix (#430) As of PHP 8.1.0, the default setting is MYSQLI_REPORT_ERROR | MYSQLI_REPORT_STRICT. Previously, it was MYSQLI_REPORT_OFF. --- snmp/mysql | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) diff --git a/snmp/mysql b/snmp/mysql index 89e3c9059..530637352 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -1310,6 +1310,7 @@ function to_int ( $str ) { function run_query($sql, $conn) { global $debug; debug($sql); + mysqli_report(MYSQLI_REPORT_OFF); $result = @mysqli_query($conn, $sql); if ( $debug && strpos($sql, 'SHOW SLAVE STATUS ') === false ) { $error = @mysqli_error($conn); @@ -1319,13 +1320,15 @@ function run_query($sql, $conn) { } } $array = array(); - $count = @mysqli_num_rows($result); - if ( $count > 10000 ) { - debug('Abnormal number of rows returned: ' . $count); - } - else { - while ( $row = @mysqli_fetch_array($result) ) { - $array[] = $row; + if ( $result ) { + $count = @mysqli_num_rows($result); + if ( $count > 10000 ) { + debug('Abnormal number of rows returned: ' . $count); + } + else { + while ( $row = @mysqli_fetch_array($result) ) { + $array[] = $row; + } } } debug(array($sql, $array)); From 5f07d0e6617dddbab53d484f580dcb7a169fcf59 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 1 Nov 2022 14:50:18 -0700 Subject: [PATCH 349/497] Cleaning up pwrstatd code (#431) --- snmp/pwrstatd.py | 180 +++++++++++++++++++++++++++++------------------ 1 file changed, 113 insertions(+), 67 deletions(-) diff --git a/snmp/pwrstatd.py b/snmp/pwrstatd.py index 919c01e4e..dc5d332a4 100755 --- a/snmp/pwrstatd.py +++ b/snmp/pwrstatd.py @@ -7,7 +7,8 @@ # Description: This is a simple script to parse "pwrstat -status" output for ingestion into # LibreNMS via the pwrstatd application. Pwrstatd is a service/application # provided by CyberPower for their personal PSUs. The software is available -# here: https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ +# here: +# https://www.cyberpowersystems.com/product/software/power-panel-personal/powerpanel-for-linux/ # Installation: # 1. Copy this script to /etc/snmp/ and make it executable: # chmod +x /etc/snmp/pwrstatd.py @@ -28,6 +29,7 @@ import json import re import subprocess +import sys CONFIG_FILE = "/etc/snmp/pwrstatd.json" KEY_TO_VARIABLE_MAP = { @@ -40,11 +42,79 @@ "Remaining Runtime": "mruntime", "Load": "wload", } -PWRSTAT_ARGS = "-status" +PWRSTAT_ARGS = ["-status"] PWRSTAT_CMD = "/sbin/pwrstat" REGEX_PATTERN = r"([\w\s]+)\.\.+ (.*)" +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + pwrstat_cmd: The full pwrstat command to execute. + """ + pwrstat_cmd = [PWRSTAT_CMD] + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + pwrstat_cmd = [config_file["pwrstat_cmd"]] + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full pwrstat command. + pwrstat_cmd.extend(PWRSTAT_ARGS) + return pwrstat_cmd + + +def command_executor(pwrstat_cmd): + """ + command_executor(): Execute the pwrstat command and return the output. + + Inputs: + pwrstat_cmd: The full pwrstat command to execute. + Outputs: + poutput: The stdout of the executed command. + """ + try: + # Execute pwrstat command + poutput = subprocess.check_output( + pwrstat_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + def value_sanitizer(key, value): """ value_sanitizer(): Parses the given value to extract the exact numerical (or string) value. @@ -57,7 +127,7 @@ def value_sanitizer(key, value): """ if key == "Firmware Number": return str(value) - elif key in ( + if key in ( "Rating Voltage", "Rating Power", "Utility Voltage", @@ -67,8 +137,42 @@ def value_sanitizer(key, value): "Load", ): return int(value.split(" ")[0]) - else: - return None + return None + + +def output_parser(pwrstat_output): + """ + output_parser(): Parses the pwrstat command output and returns a dictionary + of PSU metrics. + + Inputs: + pwrstat_output: The pwrstat command stdout + Outputs: + psu_data: A dictionary of PSU metrics. + """ + psu_data = {} + + for line in pwrstat_output.decode("utf-8").split("\n"): + regex_search = re.search(REGEX_PATTERN, line.strip()) + + if not regex_search: + continue + + try: + key = regex_search.groups()[0] + value = regex_search.groups()[1] + if key in KEY_TO_VARIABLE_MAP: + psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) + except IndexError as err: + error_handler("Command Output Parsing Error", err) + + # Manually calculate percentage load on PSU + if "wrating" in psu_data and "wload" in psu_data and psu_data["wrating"]: + # int to float hacks in-place for python2 backwards compatibility + psu_data["pload"] = int( + float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 + ) + return psu_data def main(): @@ -80,72 +184,14 @@ def main(): Outputs: None """ - pwrstat_cmd = PWRSTAT_CMD output_data = {"errorString": "", "error": 0, "version": 1, "data": []} - psu_data = { - "mruntime": None, - "pcapacity": None, - "pload": None, - "sn": None, - "voutput": None, - "vrating": None, - "vutility": None, - "wload": None, - "wrating": None, - } - # Load configuration file if it exists - try: - with open(CONFIG_FILE, "r") as json_file: - config_file = json.load(json_file) - if "pwrstat_cmd" in config_file.keys(): - pwrstat_cmd = config_file["pwrstat_cmd"] - except FileNotFoundError: - pass - except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: - output_data["error"] = 1 - output_data["errorString"] = "Config file Error: '%s'" % err + # Parse configuration file. + pwrstat_cmd = config_file_parser() - try: - # Execute pwrstat command - pwrstat_process = subprocess.Popen( - [pwrstat_cmd, PWRSTAT_ARGS], - stdin=None, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - ) - poutput, perror = pwrstat_process.communicate() - - if perror: - raise OSError(perror.decode("utf-8")) - - # Parse pwrstat command output and collect data. - for line in poutput.decode("utf-8").split("\n"): - regex_search = re.search(REGEX_PATTERN, line.strip()) - if not regex_search: - continue - - try: - key = regex_search.groups()[0] - value = regex_search.groups()[1] - if key in KEY_TO_VARIABLE_MAP.keys(): - psu_data[KEY_TO_VARIABLE_MAP[key]] = value_sanitizer(key, value) - except IndexError as err: - output_data["error"] = 1 - output_data["errorString"] = "Command Output Parsing Error: '%s'" % err - continue - - # Manually calculate percentage load on PSU - if psu_data["wrating"]: - # int to float hacks in-place for python2 backwards compatibility - psu_data["pload"] = int( - float(psu_data["wload"]) / float(psu_data["wrating"]) * 100 - ) - except (subprocess.CalledProcessError, OSError) as err: - output_data["error"] = 1 - output_data["errorString"] = "Command Execution Error: '%s'" % err + # Execute pwrstat command and parse output. + output_data["data"].append(output_parser(command_executor(pwrstat_cmd))) - output_data["data"].append(psu_data) print(json.dumps(output_data)) From 6593c296598d0fbbc3665d5148afde7edad1b1db Mon Sep 17 00:00:00 2001 From: Jethro Date: Sat, 5 Nov 2022 11:22:19 +0100 Subject: [PATCH 350/497] Add default puppet7 location of the summary file (#434) --- snmp/puppet_agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index cc9b36343..8afa87d1e 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -23,6 +23,7 @@ summary_files = [ "/var/cache/puppet/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", + "/opt/puppetlabs/puppet/public/last_run_summary.yaml", ] From 4029aa1e0c29887f073d220b68b133ed0817d901 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:04:02 -0800 Subject: [PATCH 351/497] Add wireguard script (#432) * Add wireguard script * Fixing a minor lint isort issue * Removing str sanitization that was never supposed to be part of the original commit * Fixed one final lint issue * Fixing comments to use a VALID json format. String enforement for the friendly name variable. --- snmp/wireguard.py | 247 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 247 insertions(+) create mode 100755 snmp/wireguard.py diff --git a/snmp/wireguard.py b/snmp/wireguard.py new file mode 100755 index 000000000..66c90ec93 --- /dev/null +++ b/snmp/wireguard.py @@ -0,0 +1,247 @@ +#!/usr/bin/env python +# +# Name: Wireguard Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming the +# base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "wg show all" output for ingestion into LibreNMS +# via the wireguard application. We collect traffic, a friendly identifier (arbitrary +# name), and last handshake time for all clients on all wireguard interfaces. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/wireguard.py +# 2. Edit your snmpd.conf and include: +# extend wireguard /etc/snmp/wireguard.py +# 3. Create a /etc/snmp/wireguard.json file and specify: +# a.) (optional) "wg_cmd" - String path to the wg binary ["/usr/bin/wg"] +# b.) "public_key_to_arbitrary_name" - A dictionary to convert between the publickey +# assigned to the client (specified in the wireguard +# interface conf file) to an arbitrary, friendly +# name. The friendly names MUST be unique within +# each interface. Also note that the interface name +# and friendly names are used in the RRD filename, +# so using special characters is highly discouraged. +# ``` +# { +# "wg_cmd": "/bin/wg", +# "public_key_to_arbitrary_name": { +# "wg0": { +# "z1iSIymFEFi/PS8rR19AFBle7O4tWowMWuFzHO7oRlE=": "client1", +# "XqWJRE21Fw1ke47mH1yPg/lyWqCCfjkIXiS6JobuhTI=": "server.domain.com" +# } +# } +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. +# TODO: +# 1. If Wireguard ever implements a friendly identifier, then scrape that instead of providing +# arbitrary names manually in the json conf file. + +import json +import subprocess +import sys +from datetime import datetime +from itertools import chain + +CONFIG_FILE = "/etc/snmp/wireguard.json" +WG_ARGS = ["show", "all", "dump"] +WG_CMD = "/usr/bin/wg" + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and command execution. We set + the data to none and print out the json. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": {}, + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parse the config file and extract the necessary parameters. + + Inputs: + None + Outputs: + wg_cmd: The full wg command to execute. + interface_clients_dict: Dictionary mapping of interface names to public_key->client names. + """ + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + interface_clients_dict = config_file["public_key_to_arbitrary_name"] + wg_cmd = [config_file["wg_cmd"]] if "wg_cmd" in config_file else [WG_CMD] + except ( + FileNotFoundError, + KeyError, + PermissionError, + OSError, + json.decoder.JSONDecodeError, + ) as err: + error_handler("Config File Error", err) + + # Create and return full wg command. + wg_cmd.extend(WG_ARGS) + return wg_cmd, interface_clients_dict + + +def config_file_validator(interface_clients_dict): + """ + config_file_validator(): Verifies the uniqueness of the arbitrary names in the interface to + public_key->client names dictionary. + + Inputs: + interface_clients_dict: Dictionary mapping of interface names to public_key->client names. + Outputs: + None + """ + # Search for valid, unique arbitrary names + for interface, public_key_to_arbitrary_name in interface_clients_dict.items(): + rev_dict = {} + for public_key, arbitrary_name in public_key_to_arbitrary_name.items(): + rev_dict.setdefault(str(arbitrary_name), set()).add(public_key) + + # Verify the arbitrary names set in the wireguard.json file are unique. + result = set( + chain.from_iterable( + arbitrary_name + for public_key, arbitrary_name in rev_dict.items() + if len(arbitrary_name) > 1 + ) + ) + if not result: + continue + + err = ( + "%s interface has non-unique arbitrary names configured for public keys %s" + % (interface, str(result)) + ) + error_handler("Config File Error", err) + + +def command_executor(wg_cmd): + """ + command_executor(): Execute the wg command and return the output. + + Inputs: + wg_cmd: The full wg command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + try: + # Execute wg command + poutput = subprocess.check_output( + wg_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def output_parser(line, interface_clients_dict): + """ + output_parser(): Parses a line from the wg command for the client's public key, traffic inbound + and outbound, wireguard interface, and last handshake timestamp. + + Inputs: + line: The wireguard client status line from the wg command stdout. + interface_clients_dict: Dictionary mapping of interface to public_key->client names. + Outputs: + wireguard_data: A dictionary of a peer's server interface, public key, bytes sent and + received, and minutes since last handshake + """ + line_parsed = line.strip().split() + + try: + interface = str(line_parsed[0]) + public_key = str(line_parsed[1]) + timestamp = int(line_parsed[5]) + bytes_rcvd = int(line_parsed[6]) + bytes_sent = int(line_parsed[7]) + except (IndexError, ValueError) as err: + error_handler("Command Output Parsing Error", err) + + # Return an empty dictionary if the interface is not in the dictionary. + if interface not in interface_clients_dict: + return {} + + # Return an empty dictionary if there is no public key to arbitrary name mapping. + if public_key not in interface_clients_dict[interface]: + return {} + + # Perform in-place replacement of publickeys with arbitrary names. + friendly_name = str(interface_clients_dict[interface][public_key]) + + # Calculate minutes since last handshake here + last_handshake_timestamp = datetime.fromtimestamp(timestamp) if timestamp else 0 + minutes_since_last_handshake = ( + int((datetime.now() - last_handshake_timestamp).total_seconds() / 60) + if last_handshake_timestamp + else None + ) + + wireguard_data = { + interface: { + friendly_name: { + "minutes_since_last_handshake": minutes_since_last_handshake, + "bytes_rcvd": bytes_rcvd, + "bytes_sent": bytes_sent, + } + } + } + + return wireguard_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, and unit stdout + parsing. Then it prints out the expected json output for the wireguard application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + wg_cmd, interface_clients_dict = config_file_parser() + + # Verify contents of the config file are valid. + config_file_validator(interface_clients_dict) + + # Execute wg command and parse output. We skip the first line ("[1:]") since that's the + # wireguard server's public key declaration. + for line in command_executor(wg_cmd).decode("utf-8").split("\n")[1:]: + if not line: + continue + # Parse each line and import the resultant dictionary into output_data. We update the + # interface key with new clients as they are found and instantiate new interface keys as + # they are found. + for intf, intf_data in output_parser(line, interface_clients_dict).items(): + if intf not in output_data["data"]: + output_data["data"][intf] = {} + for client, client_data in intf_data.items(): + output_data["data"][intf][client] = client_data + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From 6b93222d03c660415bfe41c5f767684383d0d2ce Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:05:29 -0800 Subject: [PATCH 352/497] Adding optional configuration file support to postgres snmp script (#437) --- snmp/postgres | 40 +++++++++++++++++++++++++++++++++++++--- 1 file changed, 37 insertions(+), 3 deletions(-) diff --git a/snmp/postgres b/snmp/postgres index c03b2ccb4..764484000 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -22,19 +22,53 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -#set the user here to use -#be sure to set up the user in .pgpass for the user snmpd is running as +# Location of optional config file. +CONFIG_FILE="/etc/snmp/postgres.config" + +# Default DBuser is pgsql. Be sure to set up the user in .pgpass for the user snmpd +# is running as. You can either update the variable below, or add "DBuser=" +# to the /etc/snmp/postgres.config file without quotes and replacing . DBuser=pgsql # You may want to disable totalling for the postgres DB as that can make the total graphs artificially noisy. # 1 = don't total stats for the DB postgres # 0 = include postgres in the totals +# To set this to 0, you can either update the variable below, or add "ignorePG=0" to +# the /etc/snmp/postgres.config file (without quotes). ignorePG=1; +# Hostname to connect to. By default this is blank and check_postgres.ph will connect +# to the Unix socket. You can either update the variable below, or add "DBhost=" +# to the /etc/snmp/postgres.config file without quotes and replacing . +DBhost="" + +# Load configuration from config file if the file exists. +if [ -f "$CONFIG_FILE" ]; then + saved_IFS=$IFS + IFS="=" + + while read -r key value; do + if [ "$key" = "DBuser" ]; then + DBuser=$value + elif [ "$key" = "ignorePG" ]; then + ignorePG=$value + elif [ "$key" = "DBhost" ]; then + DBhost=$value + fi + done < $CONFIG_FILE + + IFS=$saved_IFS +fi + #make sure the paths are right for your system cpg='/usr/bin/env check_postgres.pl' -$cpg -u $DBuser --action dbstats | awk -F ' ' ' +cpg_command="$cpg -u $DBuser --action dbstats" +if [ "$DBhost" != "" ]; then + cpg_command="$cpg_command -H $DBhost" +fi + +$cpg_command | awk -F ' ' ' BEGIN{ backends=0; From 46bb9a228d80adcc7e3c6ec1b6ac24d208a12400 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sun, 13 Nov 2022 02:06:10 -0800 Subject: [PATCH 353/497] Fixing agent-local hddtemp script (#436) --- agent-local/hddtemp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/agent-local/hddtemp b/agent-local/hddtemp index 2cee3939c..75442c725 100755 --- a/agent-local/hddtemp +++ b/agent-local/hddtemp @@ -28,9 +28,9 @@ if [ "${hddtemp}" != "" ]; then if [ -x "${hddtemp}" ]; then if type parallel > /dev/null 2>&1; then # When available, use GNU parallel for a significant performance boost. hddtemp runs serially(!) - output=$(parallel "${hddtemp}" -w -q ::: "${disks}" 2>/dev/null) + output=$(parallel "${hddtemp}" -w -q ::: ${disks} 2>/dev/null) else - output=$(${hddtemp} -w -q "${disks}" 2>/dev/null) + output=$(${hddtemp} -w -q ${disks} 2>/dev/null) fi content=$(echo "$output" | awk '{ if ($0 !~ /not available/) { print $0 } }' | awk -F": " 'BEGIN{ ORS="" }{ print "|"$1"|"$2"|"$3"|";} ' | sed 's/[° ]C|/|C|/g' | sed 's/[° ]F|/|F|/g' | tr -cd '\12\14\40-\176') if [ "${content}" != "" ]; then From 0ec50f2d93c7a660c0d8f1bbb16603797c436165 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Fri, 18 Nov 2022 21:16:38 +0100 Subject: [PATCH 354/497] Alarm Flag on UPS-Nut Application (#438) --- snmp/ups-nut.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index b5ba04fe4..e8dd3a824 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -24,7 +24,7 @@ do fi done -for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" +for value in "ups\.status:[A-Z ]{0,}OL" "ups\.status:[A-Z ]{0,}OB" "ups\.status:[A-Z ]{0,}LB" "ups\.status:[A-Z ]{0,}HB" "ups\.status:[A-Z ]{0,}RB" "ups\.status:[A-Z ]{0,}CHRG" "ups\.status:[A-Z ]{0,}DISCHRG" "ups\.status:[A-Z ]{0,}BYPASS" "ups\.status:[A-Z ]{0,}CAL" "ups\.status:[A-Z ]{0,}OFF" "ups\.status:[A-Z ]{0,}OVER" "ups\.status:[A-Z ]{0,}TRIM" "ups\.status:[A-Z ]{0,}BOOST" "ups\.status:[A-Z ]{0,}FSD" "ups\.alarm:[A-Z ]" do UNKNOWN=$(echo "$TMP" | grep -Eo "ups\.status:") if [ -z "$UNKNOWN" ]; then @@ -38,3 +38,4 @@ do fi fi done + From 9f72da6af2eb7bfe3f7928b9ab9518486793f2db Mon Sep 17 00:00:00 2001 From: 00gh <36605979+00gh@users.noreply.github.com> Date: Sun, 20 Nov 2022 03:59:09 +0100 Subject: [PATCH 355/497] snmp/Openwrt/wl*: Added stderr redirects in wl* scripts for iw/iwlist. (#440) See Issue mentioned in librenms/librenms: #14428 OpenWRT example scripts give bad output if wireless interface is down. Redirect the iw/iwlist command stderr output to /dev/null. Co-authored-by: 00gh <00gh> --- snmp/Openwrt/wlClients.sh | 2 +- snmp/Openwrt/wlFrequency.sh | 2 +- snmp/Openwrt/wlNoiseFloor.sh | 2 +- snmp/Openwrt/wlRate.sh | 3 ++- snmp/Openwrt/wlSNR.sh | 2 +- 5 files changed, 6 insertions(+), 5 deletions(-) diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index f454e592f..72e3694cb 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -25,7 +25,7 @@ fi count=0 for interface in $interfaces do - new=$(/usr/sbin/iw dev "$interface" station dump | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) + new=$(/usr/sbin/iw dev "$interface" station dump 2>/dev/null | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) count=$(( $count + $new )) done diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 658459ab5..4552cc4b7 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -12,7 +12,7 @@ if [ $# -ne 1 ]; then fi # Extract frequency -frequency=$(/usr/sbin/iw dev "$1" info | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") +frequency=$(/usr/sbin/iw dev "$1" info 2>/dev/null | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") # Return snmp result /bin/echo "$frequency" diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index a3880cf34..9cebb323d 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -13,7 +13,7 @@ fi # Extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -noise=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) +noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result /bin/echo "$noise" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index 6b9072435..d53068560 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -16,7 +16,8 @@ fi # Calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -ratelist=$(/usr/sbin/iw dev "$1" station dump | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") +ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") +result=0 if [ "$3" = "sum" ]; then result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') elif [ "$3" = "avg" ]; then diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 2378c1aac..006cae071 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -14,7 +14,7 @@ if [ $# -ne 2 ]; then fi # Calculate result. Sum just for debug, and return integer (safest / easiest) -snrlist=$(/usr/bin/iwinfo "$1" assoclist | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) +snrlist=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) if [ "$2" = "sum" ]; then result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') elif [ "$2" = "avg" ]; then From ad799dfd1d15cbd7cf48cdc288e754ea2d9170a1 Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 25 Nov 2022 22:05:34 +0800 Subject: [PATCH 356/497] typo: Fix systemd.py Installation step (#441) --- snmp/systemd.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/systemd.py b/snmp/systemd.py index 1e6b47d51..2dd248d44 100755 --- a/snmp/systemd.py +++ b/snmp/systemd.py @@ -10,7 +10,7 @@ # 1. Copy this script to /etc/snmp/ and make it executable: # chmod +x /etc/snmp/systemd.py # 2. Edit your snmpd.conf and include: -# extend systemdd /etc/snmp/systemd.py +# extend systemd /etc/snmp/systemd.py # 3. (Optional) Create a /etc/snmp/systemd.json file and specify: # a.) "systemctl_cmd" - String path to the systemctl binary ["/usr/bin/systemctl"] # b.) "include_inactive_units" - True/False string to include inactive units in From 2efa59117ef2219faa78121fa6563d06ba99e749 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sat, 10 Dec 2022 05:14:41 -0800 Subject: [PATCH 357/497] Cleaning up certificate.py code and adding cert_location support for self-signed certificates (#447) --- snmp/certificate.py | 111 +++++++++++++++++++++++++------------------- 1 file changed, 63 insertions(+), 48 deletions(-) diff --git a/snmp/certificate.py b/snmp/certificate.py index c141afcd1..a1fa87d87 100755 --- a/snmp/certificate.py +++ b/snmp/certificate.py @@ -13,8 +13,7 @@ # } -def get_certificate_data(domain, port=443): - +def get_certificate_data(domain, cert_location, port=443): context = ssl.create_default_context() conn = context.wrap_socket( socket.socket(socket.AF_INET), @@ -25,13 +24,22 @@ def get_certificate_data(domain, port=443): error_msg = None ssl_info = {} + + # Load certificate for self-signed certificates if provided + if cert_location: + try: + context.load_verify_locations(cert_location) + except (FileNotFoundError, ssl.SSLError, PermissionError) as err: + error_msg = err + return ssl_info, error_msg + try: conn.connect((domain, port)) ssl_info = conn.getpeercert() - except ConnectionRefusedError as e: - error_msg = e + except ConnectionRefusedError as err: + error_msg = err # Manage expired certificates - except ssl.SSLCertVerificationError as e: + except ssl.SSLCertVerificationError: # Arbitrary start date ssl_info["notBefore"] = "Jan 1 00:00:00 2020 GMT" # End date is now (we don't have the real one but the certificate is expired) @@ -41,53 +49,60 @@ def get_certificate_data(domain, port=443): return ssl_info, error_msg -output = {} -output["error"] = 0 -output["errorString"] = "" -output["version"] = 1 +def main(): + output = {} + output["error"] = 0 + output["errorString"] = "" + output["version"] = 1 -with open(CONFIGFILE, "r") as json_file: - try: - configfile = json.load(json_file) - except json.decoder.JSONDecodeError as e: - output["error"] = 1 - output["errorString"] = "Configfile Error: '%s'" % e - -if not output["error"]: - output_data_list = [] - for domain in configfile["domains"]: - output_data = {} - - if "port" not in domain.keys(): - domain["port"] = 443 - certificate_data, error_msg = get_certificate_data( - domain["fqdn"], domain["port"] - ) - - output_data["cert_name"] = domain["fqdn"] - - if not error_msg: - ssl_date_format = r"%b %d %H:%M:%S %Y %Z" - validity_end = datetime.datetime.strptime( - certificate_data["notAfter"], ssl_date_format - ) - validity_start = datetime.datetime.strptime( - certificate_data["notBefore"], ssl_date_format + with open(CONFIGFILE, "r") as json_file: + try: + configfile = json.load(json_file) + except json.decoder.JSONDecodeError as err: + output["error"] = 1 + output["errorString"] = "Configfile Error: '%s'" % err + + if not output["error"]: + output_data_list = [] + for domain in configfile["domains"]: + output_data = {} + + if "port" not in domain.keys(): + domain["port"] = 443 + if "cert_location" not in domain.keys(): + domain["cert_location"] = None + certificate_data, error_msg = get_certificate_data( + domain["fqdn"], domain["cert_location"], domain["port"] ) - cert_age = datetime.datetime.now() - validity_start - cert_still_valid = validity_end - datetime.datetime.now() - output_data["age"] = cert_age.days - output_data["remaining_days"] = cert_still_valid.days + output_data["cert_name"] = domain["fqdn"] - else: - output_data["age"] = None - output_data["remaining_days"] = None - output["error"] = 1 - output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) + if not error_msg: + ssl_date_format = r"%b %d %H:%M:%S %Y %Z" + validity_end = datetime.datetime.strptime( + certificate_data["notAfter"], ssl_date_format + ) + validity_start = datetime.datetime.strptime( + certificate_data["notBefore"], ssl_date_format + ) + cert_age = datetime.datetime.now() - validity_start + cert_still_valid = validity_end - datetime.datetime.now() + + output_data["age"] = cert_age.days + output_data["remaining_days"] = cert_still_valid.days + + else: + output_data["age"] = None + output_data["remaining_days"] = None + output["error"] = 1 + output["errorString"] = "%s: %s" % (domain["fqdn"], error_msg) + + output_data_list.append(output_data) + + output["data"] = output_data_list - output_data_list.append(output_data) + print(json.dumps(output)) - output["data"] = output_data_list -print(json.dumps(output)) +if __name__ == "__main__": + main() From f05fe3502f9eaa97124235410e16c0eea72ecc8d Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Sun, 11 Dec 2022 22:34:11 +0800 Subject: [PATCH 358/497] fix: remove unnecessary line breaks (#443) --- agent-local/nginx-python3.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent-local/nginx-python3.py b/agent-local/nginx-python3.py index fd710ba8c..6ebc11482 100755 --- a/agent-local/nginx-python3.py +++ b/agent-local/nginx-python3.py @@ -20,7 +20,7 @@ dataorder = ["Active", "Reading", "Writing", "Waiting", "Requests"] -print("<<>>\n") +print("<<>>") for param in dataorder: if param == "Active": From 689165a9ba7d338d73eb5658cdf84e780a91ac01 Mon Sep 17 00:00:00 2001 From: Dan Kerse Date: Tue, 13 Dec 2022 09:32:19 +1300 Subject: [PATCH 359/497] Update fail2ban so it doesn't panic when the return value is zero (#446) --- snmp/fail2ban | 60 +++++++++++++++++++++++++-------------------------- 1 file changed, 29 insertions(+), 31 deletions(-) diff --git a/snmp/fail2ban b/snmp/fail2ban index 42f29ed63..b6429e61d 100644 --- a/snmp/fail2ban +++ b/snmp/fail2ban @@ -122,39 +122,37 @@ sub stats{ elsif ($? & 127) { $toReturn{errorString}= sprintf "fail2ban-client died with signal %d, %s coredump\n", ($? & 127), ($? & 128) ? 'with' : 'without'; - } - else { - $toReturn{error}=$? >> 8; - $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; - } + } if ( $toReturn{error} == 0 ){ - - my @jailsOutputA=split(/\n/, $jailsOutput); - my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); - $jailsS=~s/.*\://; - $jailsS=~s/\s//g; - my @jails=split(/\,/, $jailsS); - - #process jails - my $int=0; - while(defined($jails[$int])){ - - #get the total for this jail - my $jailStatusOutput=`$f2bc status $jails[$int]`; - my @jailStatusOutputA=split(/\n/, $jailStatusOutput); - my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); - $jailTotal=~s/.*\://; - $jailTotal=~s/\s//g; - - #tally the total and add this jail to the list - $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; - $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; - - $int++; - } - - } + + my @jailsOutputA=split(/\n/, $jailsOutput); + my ( $jailsS )=grep( /Jail\ list/, @jailsOutputA ); + $jailsS=~s/.*\://; + $jailsS=~s/\s//g; + my @jails=split(/\,/, $jailsS); + + #process jails + my $int=0; + while(defined($jails[$int])){ + + #get the total for this jail + my $jailStatusOutput=`$f2bc status $jails[$int]`; + my @jailStatusOutputA=split(/\n/, $jailStatusOutput); + my ( $jailTotal )=grep(/Currently\ banned\:/, @jailStatusOutputA); + $jailTotal=~s/.*\://; + $jailTotal=~s/\s//g; + + #tally the total and add this jail to the list + $toReturn{data}{total} = $toReturn{data}{total} + $jailTotal; + $toReturn{data}{jails}{ $jails[$int] } = $jailTotal; + + $int++; + } + } else { + $toReturn{error}=$? >> 8; + $toReturn{errorString}="fail2ban-client exited with ".$toReturn{error}; + } my $j=JSON->new; $j->canonical(1); From c1e1aacdf4f47d6476e897f8f226337910949863 Mon Sep 17 00:00:00 2001 From: Martin <10722552+efelon@users.noreply.github.com> Date: Wed, 4 Jan 2023 00:00:33 +0100 Subject: [PATCH 360/497] Latest pi-hole API needs auth parameter for summary (#451) fix https://github.com/librenms/librenms-agent/issues/366 --- snmp/pi-hole | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index 2a7682f9f..f0d226e01 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -6,20 +6,20 @@ CONFIGFILE='/etc/snmp/pi-hole.conf' API_AUTH_KEY="" API_URL="localhost/admin/api.php" -URL_READ_ONLY="?summaryRaw" +URL_READ_ONLY="?summaryRaw&auth=" URL_QUERY_TYPE="?getQueryTypes&auth=" PICONFIGFILE='/etc/pihole/setupVars.conf' DHCPLEASEFILE='/etc/pihole/dhcp.leases' if [ -f $CONFIGFILE ]; then - # shellcheck disable=SC1090 - . $CONFIGFILE + # shellcheck disable=SC1090 + . $CONFIGFILE fi # read in pi-hole variables for DHCP range if [ -f $PICONFIGFILE ]; then - # shellcheck disable=SC1090 - . $PICONFIGFILE + # shellcheck disable=SC1090 + . $PICONFIGFILE fi #/ Description: BASH script to get Pi-hole stats @@ -89,24 +89,24 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s $API_URL"$URL_READ_ONLY" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') + GET_STATS=$(curl -s "${API_URL}${URL_READ_ONLY}${API_AUTH_KEY}" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') echo "$GET_STATS" | tr " " "\n" # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s $API_URL"$URL_QUERY_TYPE""$API_AUTH_KEY" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') + GET_QUERY_TYPE=$(curl -s "${API_URL}${URL_QUERY_TYPE}${API_AUTH_KEY}" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') echo "$GET_QUERY_TYPE" | tr " " "\n" # Find number of DHCP address in scope and current lease count # case-insensitive compare, just in case :) - if [ "${DHCP_ACTIVE,,}" = "true" ]; then + if [ -n "${DHCP_ACTIVE+x}" ] && [ "${DHCP_ACTIVE,,}" = "true" ]; then # Max IP addresses in scope # Convert IPs to decimal and subtract IFS="." read -r -a array <<< "$DHCP_START" - DHCPSTARTDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) + DHCPSTARTDECIMAL=$(( (array[0]*256**3) + (array[1]*256**2) + (array[2]*256) + array[3] )) IFS="." read -r -a array <<< "$DHCP_END" - DHCPENDDECIMAL=$(( (${array[0]}*256**3) + (${array[1]}*256**2) + (${array[2]}*256) + ${array[3]} )) - expr $DHCPENDDECIMAL - $DHCPSTARTDECIMAL + DHCPENDDECIMAL=$(( (array[0]*256**3) + (array[1]*256**2) + (array[2]*256) + array[3] )) + echo $(( DHCPENDDECIMAL - DHCPSTARTDECIMAL )) # Current lease count - cat $DHCPLEASEFILE | wc -l + wc -l < ${DHCPLEASEFILE} else echo 0 echo 0 @@ -114,7 +114,7 @@ exportdata() { } if [ -z "$*" ]; then - exportdata + exportdata fi expr "$*" : ".*--help" > /dev/null && usage expr "$*" : ".*--debug" > /dev/null && debug From 8b409de6808d2123674a62e0570dd3c1b61fbe1c Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 3 Jan 2023 15:06:16 -0800 Subject: [PATCH 361/497] =?UTF-8?q?Updating=20wireguard=20script=20to=20di?= =?UTF-8?q?scover=20interfaces=20separately=20and=20execu=E2=80=A6=20(#452?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- snmp/wireguard.py | 82 +++++++++++++++++++++++++---------------------- 1 file changed, 44 insertions(+), 38 deletions(-) diff --git a/snmp/wireguard.py b/snmp/wireguard.py index 66c90ec93..d5fb515a8 100755 --- a/snmp/wireguard.py +++ b/snmp/wireguard.py @@ -14,10 +14,14 @@ # extend wireguard /etc/snmp/wireguard.py # 3. Create a /etc/snmp/wireguard.json file and specify: # a.) (optional) "wg_cmd" - String path to the wg binary ["/usr/bin/wg"] -# b.) "public_key_to_arbitrary_name" - A dictionary to convert between the publickey -# assigned to the client (specified in the wireguard -# interface conf file) to an arbitrary, friendly -# name. The friendly names MUST be unique within +# b.) "public_key_to_arbitrary_name" - Two nested dictionaries where the key for the outer +# dictionary is the interface name, and the value for +# the outer dictionary is the inner dictionary. The +# inner dictionary is composed of key values +# corresponding to the clients' public keys +# (specified in the wireguard interface config file) +# and values corresponding to arbitrary friendly +# names. The friendly names MUST be unique within # each interface. Also note that the interface name # and friendly names are used in the RRD filename, # so using special characters is highly discouraged. @@ -44,8 +48,8 @@ from itertools import chain CONFIG_FILE = "/etc/snmp/wireguard.json" -WG_ARGS = ["show", "all", "dump"] WG_CMD = "/usr/bin/wg" +WG_ARGS_SHOW_INTFS = ["show", "interfaces"] def error_handler(error_name, err): @@ -75,7 +79,7 @@ def config_file_parser(): Inputs: None Outputs: - wg_cmd: The full wg command to execute. + wg_cmd: The final wg binary to execute. interface_clients_dict: Dictionary mapping of interface names to public_key->client names. """ # Load configuration file if it exists @@ -93,8 +97,6 @@ def config_file_parser(): ) as err: error_handler("Config File Error", err) - # Create and return full wg command. - wg_cmd.extend(WG_ARGS) return wg_cmd, interface_clients_dict @@ -132,19 +134,19 @@ def config_file_validator(interface_clients_dict): error_handler("Config File Error", err) -def command_executor(wg_cmd): +def command_executor(wg_cmd_full): """ command_executor(): Execute the wg command and return the output. Inputs: - wg_cmd: The full wg command to execute. + wg_cmd_full: The full wg command to execute. Outputs: poutput: The stdout of the executed command (empty byte-string if error). """ try: # Execute wg command poutput = subprocess.check_output( - wg_cmd, + wg_cmd_full, stdin=None, stderr=subprocess.PIPE, ) @@ -153,7 +155,7 @@ def command_executor(wg_cmd): return poutput -def output_parser(line, interface_clients_dict): +def output_parser(line, interface_clients_dict, interface): """ output_parser(): Parses a line from the wg command for the client's public key, traffic inbound and outbound, wireguard interface, and last handshake timestamp. @@ -161,18 +163,18 @@ def output_parser(line, interface_clients_dict): Inputs: line: The wireguard client status line from the wg command stdout. interface_clients_dict: Dictionary mapping of interface to public_key->client names. + interface: The wireguard interface we are parsing. Outputs: - wireguard_data: A dictionary of a peer's server interface, public key, bytes sent and - received, and minutes since last handshake + wireguard_data: A dictionary of a peer's public key, bytes sent and received, and minutes + since last handshake. """ line_parsed = line.strip().split() try: - interface = str(line_parsed[0]) - public_key = str(line_parsed[1]) - timestamp = int(line_parsed[5]) - bytes_rcvd = int(line_parsed[6]) - bytes_sent = int(line_parsed[7]) + public_key = str(line_parsed[0]) + timestamp = int(line_parsed[4]) + bytes_rcvd = int(line_parsed[5]) + bytes_sent = int(line_parsed[6]) except (IndexError, ValueError) as err: error_handler("Command Output Parsing Error", err) @@ -196,12 +198,10 @@ def output_parser(line, interface_clients_dict): ) wireguard_data = { - interface: { - friendly_name: { - "minutes_since_last_handshake": minutes_since_last_handshake, - "bytes_rcvd": bytes_rcvd, - "bytes_sent": bytes_sent, - } + friendly_name: { + "minutes_since_last_handshake": minutes_since_last_handshake, + "bytes_rcvd": bytes_rcvd, + "bytes_sent": bytes_sent, } } @@ -226,19 +226,25 @@ def main(): # Verify contents of the config file are valid. config_file_validator(interface_clients_dict) - # Execute wg command and parse output. We skip the first line ("[1:]") since that's the - # wireguard server's public key declaration. - for line in command_executor(wg_cmd).decode("utf-8").split("\n")[1:]: - if not line: - continue - # Parse each line and import the resultant dictionary into output_data. We update the - # interface key with new clients as they are found and instantiate new interface keys as - # they are found. - for intf, intf_data in output_parser(line, interface_clients_dict).items(): - if intf not in output_data["data"]: - output_data["data"][intf] = {} - for client, client_data in intf_data.items(): - output_data["data"][intf][client] = client_data + # Get list of interfaces + wg_cmd_show_intfs = wg_cmd + WG_ARGS_SHOW_INTFS + wg_intfs = command_executor(wg_cmd_show_intfs).decode("utf-8").strip().split(" ") + + # Execute wg command on each discovered interface and parse output. We skip the first line + # ("[1:]") since that's the wireguard server's public key declaration. + for interface in wg_intfs: + wg_cmd_dump = wg_cmd + ["show"] + [interface] + ["dump"] + output_data["data"][interface] = {} + for line in command_executor(wg_cmd_dump).decode("utf-8").split("\n")[1:]: + if not line: + continue + # Parse each line and import the resultant dictionary into output_data. We update the + # interface key with new clients as they are found and instantiate new interface keys as + # they are found. + for friendly_name, client_data in output_parser( + line, interface_clients_dict, interface + ).items(): + output_data["data"][interface][friendly_name] = client_data print(json.dumps(output_data)) From bb27e2e0bebee74d7288a5fd7e23b308cc88f366 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 20 Jan 2023 16:25:17 -0600 Subject: [PATCH 362/497] CAPE/Cuckoo extend update (#422) * now properly counts pending * lots more work on it and it * more work * malscore, severity, wieght, and confidence now work with packages * misc minor cleanups * fix for the processing log which includes the task id as well --- snmp/cape | 371 +++++++++++++++++++++++++++++++++++++++--------------- 1 file changed, 273 insertions(+), 98 deletions(-) diff --git a/snmp/cape b/snmp/cape index e0c2c795a..c85418014 100755 --- a/snmp/cape +++ b/snmp/cape @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2022, Zane C. Bowers-Hadley +#Copyright (c) 2023, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -34,39 +34,46 @@ Supported command line options are as below. -c Config INI file. Default: /usr/local/etc/cape_extend.ini +Depends can be installed via... + + apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl + The defeault setttings are... # DBI connection DSN dsn=dbi:Pg:dbname=cape - + # DB user user=cape - + # DB PW pass= - + # CAPEv2 cuckoo log file clog=/opt/CAPEv2/log/cuckoo.log - + # CAPEv2 process log file plog=/opt/CAPEv2/log/process.log - + + # storage location + storage=/opt/CAPEv2/storage + # 0/1 for if it is okay for the process log to not exist # this enables it to work with cuckoo as well as CAPEv2 mplogok=1 - + # list of ignores ignores=/usr/local/etc/cape_extend.ignores - + # send errors along for inclusion in the event log sendErrors=1 - + # send criticals along for inclusion in the event log sendCriticals=1 - + # send warnings along for inclusion in the event log sendWarnings= 1 - + # don't use analysis_started_on. analysis_finished_on. processing_started_on, # processing_finished_on, signatures_started_on, signatures_finished_on, # reporting_started_on, or reporting_finished_on with the SQL statement @@ -85,8 +92,17 @@ generated when VM traffic is dropped, you would use the two lines such as below. WARNING PCAP file does not exist at path WARNING Unable to Run Suricata: Pcap file +In 'conf/reporting.conf' for cape, 'litereport' will need enabled. 'keys_to_copy' +should include 'signatures' and 'detections'. + =cut +# # location of the IP cache to use +# ip_cache=/var/cache/cape_extend_ip + +# # subnets not to count for IP accounting +# ip_ignore=/usr/local/etc/cape_ip_ignore + use strict; use warnings; use Getopt::Long; @@ -95,7 +111,7 @@ use JSON; use Config::Tiny; use DBI; use Time::Piece; -use File::Slurp; +use File::Slurp qw(read_file); use Statistics::Lite qw(:all); sub version { @@ -160,7 +176,6 @@ my $return_json = { failed_analysis => 0, failed_processing => 0, failed_reporting => 0, - packages => {}, dropped_files => 0, running_processes => 0, api_calls => 0, @@ -174,16 +189,22 @@ my $return_json = { timedout => 0, pkg_stats => {}, total_tasks => 0, + wrong_pkg => 0, + detections_stats => {}, }, error => 0, errorString => '', version => 1, }; +# holds a list of reported tasks +my $reported = {}; + my @stats_for = ( 'dropped_files', 'running_processes', 'api_calls', 'domains', 'signatures_total', 'signatures_alert', 'files_written', 'registry_keys_modified', - 'crash_issues', 'anti_issues', + 'crash_issues', 'anti_issues', 'malscore', 'severity', + 'confidence', 'weight' ); my $ag_stats = { @@ -197,6 +218,10 @@ my $ag_stats = { registry_keys_modified => [], crash_issues => [], anti_issues => [], + malscore => [], + severity => [], + confidence => [], + weight => [], }; my $pkg_stats = {}; @@ -211,8 +236,11 @@ my $defaults = { pass => '', clog => '/opt/CAPEv2/log/cuckoo.log', plog => '/opt/CAPEv2/log/process.log', + storage => '/opt/CAPEv2/storage', mplogok => 1, ignores => '/usr/local/etc/cape_extend.ignores', + ip_cache => '/var/cache/cape_extend_ip', + ip_ignore => '/usr/local/etc/cape_ip_ignore', sendErrors => 1, sendCriticals => 1, sendWarnings => 1, @@ -247,6 +275,26 @@ if ( -f $config->{ignores} ) { } } +# # process the IP ignore file +# my @ip_ignores; +# if ( -f $config->{ip_ignore} ) { +# my $ip_ignore_raw = read_file( $config->{ip_ignores} ); +# @ip_ignores = grep( !/^[\ \t]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $ip_ignore_raw ) ) ); +# } + +# # process the IP ignore file +# my %ip_cache; +# if ( -f $config->{ip_ignore} ) { +# my $ip_cache_raw = read_file( $config->{ignores} ); +# # IP,count,time +# # Time is unix time. +# my @ip_cache_split = grep( !/^[0-9a-fA-F\:\.]+\,[0-9]+\,[0-9]+$/, split( /\n/, $ip_cache_raw ) ); +# foreach my $line (@ip_cache_split) { +# my ( $ip, $ip_count, $ip_time ) = split( /\,/ . $line ); +# $ip_cache{$ip} = { count => $ip_count, time => $ip_time }; +# } +# } + # put together the list of logs to read my @logs; if ( !-f $config->{clog} ) { @@ -262,6 +310,10 @@ else { push( @logs, $config->{plog} ); } +# +# process all the log lines, counting them +# + my $process_loop = 0; my $process_logs = 1; while ( $process_logs && defined( $logs[$process_loop] ) ) { @@ -274,12 +326,29 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { my $current_entry = ''; while ( defined($bw) && defined( my $log_line = $bw->readline ) && $continue ) { $current_entry = $log_line . $current_entry; - if ( $current_entry - =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + if ( + ( + $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + || ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) ) { + my ( $date, $time, $log_task_id, $lib, $level, $entry ); + # parse it and blank it for when we get to the next one. - my ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + if ( $current_entry + =~ /^20[0-9][0-9]\-[01][0-9]\-[0-3][0-9]\ [0-2][0-9]\:[0-5][0-9]\:[0-5][0-9]\,[0-9]+\ \[[a-z-A-Z\.0-9\_\-]+\]\ [a-zA-Z]+\:/ + ) + { + ( $date, $time, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 5 ); + } + else { + ( $date, $time, $log_task_id, $lib, $level, $entry ) = split( /[\ \t]+/, $current_entry, 6 ); + $entry = $log_task_id . ': ' . $entry; + } $current_entry = ''; # chomp off the seconds place after the , @@ -314,6 +383,9 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { push( @{ $return_json->{data}->{criticals} }, $entry ); } } + if ( $level eq 'warning' && $entry =~ /submitted\ the\ job\ with\ wrong\ package/ ) { + $return_json->{wrong_pkg}++; + } } } } @@ -322,10 +394,13 @@ while ( $process_logs && defined( $logs[$process_loop] ) ) { $process_loop++; } +# +# put together query for getting the current tasks +# my $query; if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { - $query - = "select status,package from tasks where ( added_on > FROM_UNIXTIME('" + $query = "select id,status,package from tasks where ( status != 'pending' ) and '. +'( added_on > FROM_UNIXTIME('" . $target_time . "')) or " . "( started_on > FROM_UNIXTIME('" @@ -336,9 +411,11 @@ if ( $config->{dsn} =~ /^[Dd][Bb][Ii]:[Mm]ysql/ ) { } else { $query - = "select status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " + = "select id,status,package,dropped_files,running_processes,api_calls,domains,signatures_total,signatures_alert,files_written,registry_keys_modified,crash_issues,anti_issues,timedout from tasks where" + . " (status != 'pending') and " + . " ( added_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " . "( started_on > CURRENT_TIMESTAMP - interval '5 minutes' ) or " - . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' )"; + . "( completed_on > CURRENT_TIMESTAMP - interval '5 minutes' ) "; if ( !$config->{cuckoosql} ) { $query = $query @@ -358,6 +435,13 @@ else { eval { my $dbh = DBI->connect( $config->{dsn}, $config->{user}, $config->{pass} ) || die($DBI::errstr); + + eval { + my $sth_pending = $dbh->prepare("select * from tasks where status = 'pending'"); + $sth_pending->execute; + $return_json->{data}{pending} = $sth_pending->rows; + }; + my $sth = $dbh->prepare($query); $sth->execute; my $task_status; @@ -373,6 +457,7 @@ eval { my $crash_issues; my $anti_issues; my $timedout; + my $task_id; # # MySQL is basically for old Cuckoo support. # CAPEv2 does not really play nice with it because of column issues @@ -388,10 +473,10 @@ eval { } else { $sth->bind_columns( - undef, \$task_status, \$task_package, \$dropped_files, - \$running_processes, \$api_calls, \$domains, \$signatures_total, - \$signatures_alert, \$files_written, \$registry_keys_modified, \$crash_issues, - \$anti_issues, \$timedout + undef, \$task_id, \$task_status, \$task_package, + \$dropped_files, \$running_processes, \$api_calls, \$domains, + \$signatures_total, \$signatures_alert, \$files_written, \$registry_keys_modified, + \$crash_issues, \$anti_issues, \$timedout ); while ( $sth->fetch ) { if ( defined( $return_json->{data}->{$task_status} ) ) { @@ -399,122 +484,174 @@ eval { $return_json->{data}->{total_tasks}++; } - # skip blank entries - if ( $task_package ne '' ) { - if ( defined( $return_json->{data}->{packages}->{$task_package} ) ) { - $return_json->{data}->{packages}->{$task_package}++; - } - else { - $return_json->{data}->{packages}->{$task_package} = 1; - } + if ( $task_status eq 'reported' ) { + $reported->{$task_id} = { + package => $task_package, + dropped_files => $dropped_files, + running_processes => $running_processes, + domains => $domains, + api_calls => $api_calls, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issue => $crash_issues, + anti_issues => $anti_issues, + timedout => $timedout, + }; } - if ( defined($running_processes) ) { + if ( !defined($task_package) || $task_package eq '' ) { + $task_package = 'generic'; + } + + if ( !defined($running_processes) ) { + $running_processes = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{running_processes} += $running_processes; push( @{ $ag_stats->{running_processes} }, $running_processes ); } - else { + if ( !defined($api_calls) ) { + $api_calls = 0; } - - if ( defined($api_calls) ) { + if ( $task_status eq 'reported' ) { $return_json->{data}->{api_calls} += $api_calls; push( @{ $ag_stats->{api_calls} }, $api_calls ); } - if ( defined($domains) ) { + if ( !defined($domains) ) { + $domains = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{domains} += $domains; push( @{ $ag_stats->{domains} }, $domains ); } - if ( defined($signatures_alert) ) { + if ( !defined($signatures_alert) ) { + $signatures_alert = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{signatures_alert} += $signatures_alert; push( @{ $ag_stats->{signatures_alert} }, $signatures_alert ); } - if ( defined($signatures_total) ) { + if ( !defined($signatures_total) ) { + $signatures_total = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{signatures_total} += $signatures_total; push( @{ $ag_stats->{signatures_total} }, $signatures_total ); } - if ( defined($files_written) ) { + if ( !defined($files_written) ) { + $files_written = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{files_written} += $files_written; push( @{ $ag_stats->{files_written} }, $files_written ); } - if ( defined($registry_keys_modified) ) { + if ( !defined($registry_keys_modified) ) { + $registry_keys_modified = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{registry_keys_modified} += $registry_keys_modified; push( @{ $ag_stats->{registry_keys_modified} }, $registry_keys_modified ); } - if ( defined($crash_issues) ) { + if ( !defined($crash_issues) ) { + $crash_issues = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{crash_issues} += $crash_issues; push( @{ $ag_stats->{crash_issues} }, $crash_issues ); } - if ( defined($anti_issues) ) { + if ( !defined($anti_issues) ) { + $anti_issues = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{anti_issues} += $anti_issues; push( @{ $ag_stats->{anti_issues} }, $anti_issues ); } - if ( defined($dropped_files) ) { + if ( !defined($dropped_files) ) { + $dropped_files = 0; + } + if ( $task_status eq 'reported' ) { $return_json->{data}->{dropped_files} += $dropped_files; push( @{ $ag_stats->{dropped_files} }, $dropped_files ); + } - # put per package stats together - if ( $task_package ne '' ) { - if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { - $return_json->{data}->{pkg_stats}->{$task_package} = { - dropped_files => $dropped_files, - running_processes => $running_processes, - api_calls => $api_calls, - domains => $domains, - signatures_total => $signatures_total, - signatures_alert => $signatures_alert, - files_written => $files_written, - registry_keys_modified => $registry_keys_modified, - crash_issues => $crash_issues, - anti_issues => $anti_issues - }; - $pkg_stats->{$task_package} = { - dropped_files => [$dropped_files], - running_processes => [$running_processes], - api_calls => [$api_calls], - domains => [$domains], - signatures_total => [$signatures_total], - signatures_alert => [$signatures_alert], - files_written => [$files_written], - registry_keys_modified => [$registry_keys_modified], - crash_issues => [$crash_issues], - anti_issues => [$anti_issues] - }; - } - else { - $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; - $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; - $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; - $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; - $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; - $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; - $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; - $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} - += $registry_keys_modified; - $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; - $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; - - push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); - push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); - push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); - push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); - push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); - push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); - push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); - push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); - push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); - push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); - } - } + # put per package stats together + if ( !defined( $return_json->{data}->{pkg_stats}->{$task_package} ) ) { + $return_json->{data}->{pkg_stats}->{$task_package} = { + dropped_files => $dropped_files, + running_processes => $running_processes, + api_calls => $api_calls, + domains => $domains, + signatures_total => $signatures_total, + signatures_alert => $signatures_alert, + files_written => $files_written, + registry_keys_modified => $registry_keys_modified, + crash_issues => $crash_issues, + anti_issues => $anti_issues, + banned => 0, + pending => 0, + running => 0, + completed => 0, + distributed => 0, + reported => 0, + recovered => 0, + failed_analysis => 0, + failed_processing => 0, + failed_reporting => 0, + tasks => 1, + }; + $pkg_stats->{$task_package} = { + dropped_files => [$dropped_files], + running_processes => [$running_processes], + api_calls => [$api_calls], + domains => [$domains], + signatures_total => [$signatures_total], + signatures_alert => [$signatures_alert], + files_written => [$files_written], + registry_keys_modified => [$registry_keys_modified], + crash_issues => [$crash_issues], + anti_issues => [$anti_issues], + malscore => [], + confidence => [], + severity => [], + }; + } + else { + $return_json->{data}->{pkg_stats}->{$task_package}->{tasks}++; + $return_json->{data}->{pkg_stats}->{$task_package}->{dropped_files} += $dropped_files; + $return_json->{data}->{pkg_stats}->{$task_package}->{running_processes} += $running_processes; + $return_json->{data}->{pkg_stats}->{$task_package}->{api_calls} += $api_calls; + $return_json->{data}->{pkg_stats}->{$task_package}->{domains} += $domains; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_total} += $signatures_total; + $return_json->{data}->{pkg_stats}->{$task_package}->{signatures_alert} += $signatures_alert; + $return_json->{data}->{pkg_stats}->{$task_package}->{files_written} += $files_written; + $return_json->{data}->{pkg_stats}->{$task_package}->{registry_keys_modified} + += $registry_keys_modified; + $return_json->{data}->{pkg_stats}->{$task_package}->{crash_issues} += $crash_issues; + $return_json->{data}->{pkg_stats}->{$task_package}->{anti_issues} += $anti_issues; + + push( @{ $pkg_stats->{$task_package}->{dropped_files} }, $dropped_files ); + push( @{ $pkg_stats->{$task_package}->{running_processes} }, $running_processes ); + push( @{ $pkg_stats->{$task_package}->{api_calls} }, $api_calls ); + push( @{ $pkg_stats->{$task_package}->{domains} }, $domains ); + push( @{ $pkg_stats->{$task_package}->{signatures_total} }, $signatures_total ); + push( @{ $pkg_stats->{$task_package}->{signatures_alert} }, $signatures_alert ); + push( @{ $pkg_stats->{$task_package}->{files_written} }, $files_written ); + push( @{ $pkg_stats->{$task_package}->{registry_keys_modified} }, $registry_keys_modified ); + push( @{ $pkg_stats->{$task_package}->{crash_issues} }, $crash_issues ); + push( @{ $pkg_stats->{$task_package}->{anti_issues} }, $anti_issues ); } + $return_json->{data}->{pkg_stats}->{$task_package}->{$task_status}++; # timedout value is not a perl boolean if ( $timedout =~ /^[Ff]/ ) { @@ -528,7 +665,43 @@ if ($@) { $return_json->{errorString} = $return_json->{errorString} . ' SQL error: ' . $@; } +# +# put together the stats for the reported items +# +foreach my $task_id ( keys( %{$reported} ) ) { + eval { + my $report = decode_json( read_file( $config->{storage} . '/analyses/' . $task_id . '/reports/lite.json' ) ); + my $package = $report->{info}{package}; + if ( defined( $report->{malscore} ) ) { + push( @{ $ag_stats->{malscore} }, $report->{malscore} ); + push( @{ $pkg_stats->{$package}{malscore} }, $report->{malscore} ); + } + + my $sig_int = 0; + while ( defined( $report->{signatures}[$sig_int] ) ) { + if ( defined( $report->{signatures}[$sig_int]{confidence} ) ) { + push( @{ $ag_stats->{confidence} }, $report->{signatures}[$sig_int]{confidence} ); + push( @{ $pkg_stats->{$package}{confidence} }, $report->{signatures}[$sig_int]{confidence} ); + } + + if ( defined( $report->{signatures}[$sig_int]{severity} ) ) { + push( @{ $ag_stats->{severity} }, $report->{signatures}[$sig_int]{severity} ); + push( @{ $pkg_stats->{$package}{severity} }, $report->{signatures}[$sig_int]{severity} ); + } + + if ( defined( $report->{signatures}[$sig_int]{weight} ) ) { + push( @{ $ag_stats->{weight} }, $report->{signatures}[$sig_int]{weight} ); + push( @{ $pkg_stats->{$package}{weight} }, $report->{signatures}[$sig_int]{weight} ); + } + + $sig_int++; + } + }; +} + +# # compute the aggregate stats +# foreach my $current_entry (@stats_for) { if ( $#{ $ag_stats->{$current_entry} } > 0 ) { $return_json->{data}{ 'min.' . $current_entry } = min( @{ $ag_stats->{$current_entry} } ); @@ -557,7 +730,9 @@ foreach my $current_entry (@stats_for) { } +# # compute the stats for each package +# foreach my $current_pkg ( keys( %{$pkg_stats} ) ) { foreach my $current_entry (@stats_for) { if ( $#{ $pkg_stats->{$current_pkg}{$current_entry} } > 0 ) { From 478e7c96dcf0ebcfb64ef66be8d5e75637586d73 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 24 Jan 2023 13:00:25 -0600 Subject: [PATCH 363/497] fix depends for cape extend (#454) --- snmp/cape | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) mode change 100755 => 100644 snmp/cape diff --git a/snmp/cape b/snmp/cape old mode 100755 new mode 100644 index c85418014..8c58b3f02 --- a/snmp/cape +++ b/snmp/cape @@ -36,7 +36,9 @@ Supported command line options are as below. Depends can be installed via... - apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl + + apt-get install libfile-readbackwards-perl libjson-perl libconfig-tiny-perl libdbi-perl libfile-slurp-perl libstatistics-lite-perl libdbi-perl libdbd-pg-perl + The defeault setttings are... From ab325efa4a51f378264247ec5f2e89f54afc452d Mon Sep 17 00:00:00 2001 From: bnerickson Date: Sat, 28 Jan 2023 05:51:18 -0800 Subject: [PATCH 364/497] Initial commit for linux_iw script (#442) --- snmp/linux_iw.py | 409 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 409 insertions(+) create mode 100755 snmp/linux_iw.py diff --git a/snmp/linux_iw.py b/snmp/linux_iw.py new file mode 100755 index 000000000..1b3cb4196 --- /dev/null +++ b/snmp/linux_iw.py @@ -0,0 +1,409 @@ +#!/usr/bin/env python + +""" +Name: linux_iw Script +Author: bnerickson w/SourceDoctor's certificate.py script forming the base + of the vast majority of this one. +Version: 1.0 +Description: This is a simple script to parse iw command output for ingestion into LibreNMS via the + linux_iw application. This script can be used on wireless clients as well as wireless + access points. +Installation: + 1. Copy this script to /etc/snmp/ and make it executable: + chmod +x /etc/snmp/linux_iw.py + 2. Edit your snmpd.conf and include: + extend linux_iw /etc/snmp/linux_iw.py + 3. (optional) Create a /etc/snmp/linux_iw.json file and specify: + a.) (optional) "linux_iw_cap_lifetime" - Specify the number of days a dead client (for + APs) or AP (for clients) should remain on the graphs in LibreNMS before being removed + (data is not removed, however). There are two special values that can also be used: + specifying '0' will never expire any client and specifying '-1' (or any negative + value) will result in NO client wireless metrics being graphed in LibreNMS [global + default: 0] + b.) (optional) "iw_cmd" - String path to the wg binary [default: "/usr/sbin/iw"] + c.) (optional) "mac_addr_to_friendly_name" - A dictionary to convert between the wireless + mac address and a friendly, arbitrary name for wireless clients. This name will be + used on the graph titles in LibreNMS, so it's just for readability and easier human = + parsing of data. + ``` + { + "linux_iw_cap_lifetime": 50, + "iw_cmd": "/bin/iw", + "mac_addr_to_friendly_name": { + "00:53:00:00:00:01": "client_1.domain.tlv", + "00:53:ff:ff:ff:ff": "my_tablet" + } + } + ``` + 4. Restart snmpd and activate the app for desired host. +""" + +import json +import re +import subprocess +import sys + +VALID_MAC_ADDR = ( + r"([0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F]:" + + r"[0-9a-fA-F][0-9a-fA-F])" +) +CONFIG_FILE = "/etc/snmp/linux_iw.json" +INITIAL_REGEX_MAPPER = { + "interfaces": { + "regex": r"(?m)\s+Interface (.+)$", + }, + "stations": {"regex": r"(?m)^Station " + VALID_MAC_ADDR + r" \(on "}, +} +SUB_REGEX_MAPPER = { + "interface_info": { + "center1": { + "regex": ( + r"^\s+channel \d+ \(\d+ MHz\), width: \d+ MHz,.*center1: " + + r"(\d+) MHz" + ), + "variable_type": "type_int", + }, + "center2": { + "regex": ( + r"^\s+channel \d+ \(\d+ MHz\), width: \d+ MHz,.*center2: " + + r"(\d+) MHz" + ), + "variable_type": "type_int", + }, + "channel": { + "regex": r"^\s+channel \d+ \((\d+) MHz\)", + "variable_type": "type_int", + }, + "ssid": { + "regex": r"^\s+ssid (.+)$", + "variable_type": "type_string", + }, + "txpower": { + "regex": r"^\s+txpower (\d+\.\d+) dBm$", + "variable_type": "type_float", + }, + "type": { + "regex": r"^\s+type (.+)$", + "variable_type": "type_string", + }, + "width": { + "regex": r"^\s+channel \d+ \(\d+ MHz\), width: (\d+) MHz", + "variable_type": "type_int", + }, + }, + "station_get": { + "beacon_interval": { + "regex": r"^\s+beacon interval:\s*(\d+)$", + "variable_type": "type_int", + }, + "connected_time": { + "regex": r"^\s+connected time:\s*(\d+) seconds$", + "variable_type": "type_int", + }, + "dtim_interval": { + "regex": r"^\s+DTIM period:\s*(\d+)$", + "variable_type": "type_int", + }, + "inactive_time": { + "regex": r"^\s+inactive time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "rx_bitrate": { + "regex": r"^\s+rx bitrate:\s*(\d+\.\d+) MBit\/s.*", + "variable_type": "type_float", + }, + "rx_bytes": { + "regex": r"^\s+rx bytes:\s*(\d+)$", + "variable_type": "type_int", + }, + "rx_drop_misc": { + "regex": r"^\s+rx drop misc:\s*(\d+)$", + "variable_type": "type_int", + }, + "rx_duration": { + "regex": r"^\s+rx duration:\s*(\d+) us$", + "variable_type": "type_int", + }, + "rx_packets": { + "regex": r"^\s+rx packets:\s*(\d+)$", + "variable_type": "type_int", + }, + "signal": { + "regex": r"^\s+signal:\s*(-?\d+) \[-?\d+, -?\d+\] dBm$", + "variable_type": "type_int", + }, + "tx_bitrate": { + "regex": r"^\s+tx bitrate:\s*(\d+\.\d+) MBit\/s.*", + "variable_type": "type_float", + }, + "tx_bytes": { + "regex": r"^\s+tx bytes:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_failed": { + "regex": r"^\s+tx failed:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_packets": { + "regex": r"^\s+tx packets:\s*(\d+)$", + "variable_type": "type_int", + }, + "tx_retries": { + "regex": r"^\s+tx retries:\s*(\d+)$", + "variable_type": "type_int", + }, + }, + "survey_dump": { + "noise": { + "regex": r"^\s+noise:\s*(-?\d+) dBm$", + "variable_type": "type_int", + }, + "channel_active_time": { + "regex": r"^\s+channel active time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_busy_time": { + "regex": r"^\s+channel busy time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_receive_time": { + "regex": r"^\s+channel receive time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + "channel_transmit_time": { + "regex": r"^\s+channel transmit time:\s*(\d+) ms$", + "variable_type": "type_int", + }, + }, +} +IW_CMD = "/usr/sbin/iw" + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": f"{error_name}: '{err}'", + "error": 1, + "version": 1, + "data": {}, + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + iw_cmd: The full iw binary as a string in a list to execute. + mac_addr_to_friendly_name: Dictionary mapping of mac addresses to friendly, arbitrary names. + """ + linux_iw_cap_lifetime = None + iw_cmd = [IW_CMD] + mac_addr_to_friendly_name = {} + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "linux_iw_cap_lifetime" in config_file: + linux_iw_cap_lifetime = config_file["linux_iw_cap_lifetime"] + if "iw_cmd" in config_file: + iw_cmd = [config_file["iw_cmd"]] + if "mac_addr_to_friendly_name" in config_file: + # Convert all mac addresses to lower case. + mac_addr_to_friendly_name = dict( + (k.lower(), v) + for k, v in config_file["mac_addr_to_friendly_name"].items() + ) + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return full iw command. + return linux_iw_cap_lifetime, iw_cmd, mac_addr_to_friendly_name + + +def command_executor(iw_cmd, iw_args, command_output_regex): + """ + command_executor(): Execute the iw command and return the output. + + Inputs: + iw_cmd: The full iw binary as a string in a list. + iw_args: Args to pass to the iw command. + command_output_refex: Regex to filter output after command execution. + Outputs: + poutput: The utf-8-encoded stdout of the executed command. + """ + try: + # Execute iw command + poutput = subprocess.check_output( + iw_cmd + iw_args, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + + # Filter stdout with regex if it was passed. + if command_output_regex: + regex_search = re.search(command_output_regex, poutput.decode("utf-8")) + poutput = regex_search.group().encode("utf-8") if regex_search else None + + return poutput + + +def output_parser(iw_output, iw_regex_dict): + """ + output_parser(): Parses the iw command output and returns a dictionary + of PSU metrics. + + Inputs: + iw_output: The iw command stdout + iw_regex_dict: A dictionary of regex and variable type values. + Outputs: + iw_data: A dictionary of iw metics. + """ + iw_data = {} + + if not iw_output: + return iw_data + + for line in iw_output.decode("utf-8").split("\n"): + for metric_type, regex_dict in iw_regex_dict.items(): + regex_search = re.search(regex_dict["regex"], line) + + if not regex_search: + continue + + try: + metric_value = regex_search.groups()[0] + + if regex_dict["variable_type"] == "type_int": + iw_data[metric_type] = int(metric_value) + if regex_dict["variable_type"] == "type_float": + iw_data[metric_type] = float(metric_value) + if regex_dict["variable_type"] == "type_string": + iw_data[metric_type] = str(metric_value) + except (IndexError, ValueError) as err: + error_handler("Command Output Parsing Error", err) + + return iw_data + + +def main(): + """ + main(): main function performs iw command execution and output parsing. + + Inputs: + None + Outputs: + None + """ + # Parse configuration file. + linux_iw_cap_lifetime, iw_cmd, mac_addr_to_friendly_name = config_file_parser() + + output_data = { + "errorString": "", + "error": 0, + "version": 1, + "data": { + "linux_iw_cap_lifetime": int(linux_iw_cap_lifetime) + if linux_iw_cap_lifetime + else None, + "friendly_names": mac_addr_to_friendly_name, + "interfaces": {}, + }, + } + + # Get list of interfaces + interfaces = re.findall( + INITIAL_REGEX_MAPPER["interfaces"]["regex"], + command_executor(iw_cmd, ["dev"], None).decode("utf-8"), + ) + + # Get operational mode of each interface. + + # Get interface commands output + for interface in interfaces: + output_data["data"]["interfaces"][interface] = {} + + # Get interface info + output_data["data"]["interfaces"][interface].update( + output_parser( + command_executor(iw_cmd, ["dev", interface, "info"], None), + SUB_REGEX_MAPPER["interface_info"], + ) + ) + + survey_dump_command_output_regex = ( + r"(?m)Survey data from " + + interface + + r"\s+frequency:\s*\d+ MHz \[in use\]\n(\s+.*\n)+" + ) + # Get survey info + output_data["data"]["interfaces"][interface].update( + output_parser( + command_executor( + iw_cmd, + [interface, "survey", "dump"], + survey_dump_command_output_regex, + ), + SUB_REGEX_MAPPER["survey_dump"], + ) + ) + + # Get list of stations connected to interface + stations = re.findall( + INITIAL_REGEX_MAPPER["stations"]["regex"] + interface + r"\)$", + command_executor( + iw_cmd, ["dev", interface, "station", "dump"], None + ).decode("utf-8"), + ) + + # Get station info + output_data["data"]["interfaces"][interface]["caps"] = {} + for station in stations: + output_data["data"]["interfaces"][interface]["caps"][station] = {} + output_data["data"]["interfaces"][interface]["caps"][station].update( + output_parser( + command_executor( + iw_cmd, ["dev", interface, "station", "get", station], None + ), + SUB_REGEX_MAPPER["station_get"], + ) + ) + + # Calculate SNR + if ( + "noise" not in output_data["data"]["interfaces"][interface] + or "signal" + not in output_data["data"]["interfaces"][interface]["caps"][station] + ): + continue + output_data["data"]["interfaces"][interface]["caps"][station]["snr"] = ( + output_data["data"]["interfaces"][interface]["caps"][station]["signal"] + - output_data["data"]["interfaces"][interface]["noise"] + ) + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From ad7e1786ea0a0509ce21db948534f03af76c200c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 16 Feb 2023 20:14:30 -0600 Subject: [PATCH 365/497] add a extend for privoxy (#458) --- snmp/privoxy | 449 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 449 insertions(+) create mode 100755 snmp/privoxy diff --git a/snmp/privoxy b/snmp/privoxy new file mode 100755 index 000000000..b52405910 --- /dev/null +++ b/snmp/privoxy @@ -0,0 +1,449 @@ +#!/usr/bin/env perl + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf as below and restart snmpd. + + extend privoxy /etc/snmp/extends/privoxy + +Supported command line options are as below. + + -f Logfile. + Default: /var/log/privoxy/logfile + -c gzip+base64 compression + -p Pretty print. + +The last is only really relevant to the usage with SNMP. + +=cut + +use strict; +use warnings; +use Getopt::Std; +use File::ReadBackwards; +use JSON; +use Time::Piece; +use IPC::Run3; + +# get the current time +my $t = localtime; +my $till = $t->epoch; +$till = $till - 300; + +# needed as strptime will always assume UTC, resulting in localtime and it being off +if ( $t->tzoffset =~ /^-/ ) { + my $offset = $t->tzoffset; + $offset =~ s/^\-//; + $till = $till - $offset; +} +else { + my $offset = $t->tzoffset; + $offset =~ s/^\+//; + $till = $till + $offset; +} + +my $logfile = '/var/log/privoxy/logfile'; +my $compress; + +#gets the options +my %opts; +getopts( 'f:cp', \%opts ); +if ( defined( $opts{f} ) ) { + $logfile = $opts{f}; +} +if ( defined( $opts{c} ) ) { + $compress = 1; +} + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty(); +} + +# initiate what will be returned +my $to_return = { + error => 0, + errorString => '', + version => 1, + data => { + client_requests => 0, + client_cons => 0, + out_requests => 0, + crunches => 0, + blocks => 0, + block_percent => 0, + fast_redirs => 0, + con_timeouts => 0, + con_failures => 0, + ska_offers => 0, + nog_conns => 0, + reused_server_cons => 0, + empty_resps => 0, + empty_resps_new => 0, + empty_resps_reuse => 0, + imp_accounted => 0, + req_get => 0, + req_head => 0, + req_post => 0, + req_put => 0, + req_delete => 0, + req_connect => 0, + req_options => 0, + req_trace => 0, + req_patch => 0, + ver_1_0 => 0, + ver_1_1 => 0, + ver_2 => 0, + ver_3 => 0, + max_reqs => 0, + bytes_to_client => 0, + resp_1xx => 0, + resp_2xx => 0, + resp_200 => 0, + resp_2xx_other => 0, + resp_3xx => 0, + resp_301 => 0, + resp_302 => 0, + resp_303 => 0, + resp_3xx_other => 0, + resp_4xx => 0, + resp_403 => 0, + resp_404 => 0, + resp_451 => 0, + resp_4xx_other => 0, + resp_5xx => 0, + resp_500 => 0, + resp_502 => 0, + resp_503 => 0, + resp_504 => 0, + resp_5xx_other => 0, + unique_bdomains => 0, + unique_bdomains_np => 0, + unique_domains => 0, + unique_domains_np => 0, + ubd_np_per => 0, + ubd_per => 0, + }, +}; + +my $bw; +eval { $bw = File::ReadBackwards->new($logfile) + or die "can't read " . $logfile . "... $!"; }; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = $@; + $to_return->{data} = {}; + print $json->encode($to_return); + if ( !$opts{p} ) { + print "\n"; + } + exit 0; +} + +my $read_file = 1; + +# holds a list of blocked domains found +my $unique_bdomains = {}; +my $unique_bdomains_np = {}; + +# holds a list of domains found +my $unique_domains = {}; +my $unique_domains_np = {}; + +# read all log lines in reverse +my $lines = ''; +my $log_line = ''; +while ( defined( $log_line = $bw->readline ) + && $read_file ) +{ + my $log_t; + + # get the timestamp on non-CLF style log lines + if ( $log_line =~ /^(?\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d)/ ) { + $log_t = Time::Piece->strptime( $+{timestamp}, '%Y-%m-%d %H:%M:%S' ); + } + + # get the timestamp on CLF style log lines + elsif ( $log_line =~ /\[(?\d\d\/[A-Za-z]+\/\d\d\d\d\:\d\d\:\d\d\:\d\d)\]/ ) { + $log_t = Time::Piece->strptime( $+{timestamp}, '%d/%b/%Y:%H:%M:%S' ); + } + + if ( defined($log_t) ) { + + # if we have gone beyond where we want to go to, then stop... + # otherwise add it + if ( $log_t->epoch < $till ) { + $read_file = 0; + } + else { + $lines = $log_line . $lines; + + if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Crunch\:\ Blocked\:\ / ) { + my $log_line_tmp = $log_line; + $log_line_tmp =~ s/.*Crunch\:\ Blocked\:\ //; + $unique_bdomains->{$log_line_tmp} = 1; + $log_line =~ s/\:\d+$//; + $unique_bdomains_np->{$log_line_tmp} = 1; + } + if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Connect\:\ to\ / ) { + my $log_line_tmp = $log_line; + $log_line_tmp =~ s/.*Connect\:\ to\ //; + + # if it has a space, it is a line displaying the stating of the connect + if ( $log_line_tmp !~ /\ / ) { + $unique_domains->{$log_line_tmp} = 1; + $log_line =~ s/\:\d+$//; + $unique_domains_np->{$log_line_tmp} = 1; + } + } + } + } + + # if we don't have log_t, just add the line and lot the log parser figure out what it is + else { + $lines = $log_line . $lines; + } +} + +my $stdout; +my $stderr; +my @cmd = ( 'privoxy-log-parser.pl', '--statistics', '--show-complete-request-distribution' ); +run3( \@cmd, \$lines, \$stdout, \$stderr ); + +my @stdout_split = split( /\n/, $stdout ); + +my $multiline_mode; +foreach my $line (@stdout_split) { + + # needed as some lines have white space on the end that makes parsing annoying + $line =~ s/\ +$//; + + # start processing lines based on the start of the line + if ( $line =~ /^Client\ requests\ total\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $to_return->{data}{client_requests} = $line; + } + elsif ( $line =~ /^Crunches\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{crunches} = $line; + } + elsif ( $line =~ /^Blocks:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{blocks} = $line; + } + elsif ( $line =~ /^Fast\ redirections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{fast_redirs} = $line; + } + elsif ( $line =~ /^Connection\ timeouts\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{con_timeouts} = $line; + } + elsif ( $line =~ /^Connection\ failures\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{con_failures} = $line; + } + elsif ( $line =~ /^Outgoing\ requests\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{out_requests} = $line; + } + elsif ( $line =~ /^Server keep-alive offers\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{ska_offers} = $line; + } + elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ \-//; + $line =~ s/\ .*$//; + $to_return->{data}{nog_conns} = $line; + } + elsif ( $line =~ /^Reused\ server\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*connections\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{reused_server_cons} = $line; + } + elsif ( $line =~ /^Empty\ responses\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps} = $line; + } + elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps_new} = $line; + } + elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{empty_resps_reuse} = $line; + } + elsif ( $line =~ /^Client\ connections\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{client_cons} = $line; + } + elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ //; + $line =~ s/\ .*$//; + $to_return->{data}{bytes_to_client} = $line; + } + elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { + $multiline_mode = ''; + $line =~ s/.*\:\ \~//; + $line =~ s/\ .*$//; + $to_return->{data}{imp_accounted} = $line; + } + + # match various multi line modes starts + elsif ( $line =~ /^Client\ requests\ per\ connection\ distribution\:/ ) { + $multiline_mode = 'requests per con'; + } + elsif ( $line =~ /^Method\ distribution\:/ ) { + $multiline_mode = 'method'; + } + elsif ( $line =~ /^Client HTTP versions:/ ) { + $multiline_mode = 'version'; + } + elsif ( $line + =~ /^HTTP\ status\ codes\ according\ to\ \'debug\ 512\' \(status\ codes\ sent\ by\ the\ server\ may\ differ\)\:/ + ) + { + $multiline_mode = 'response'; + } + + # if it starts with a space, it is a multiline mode item + elsif ( $line =~ /^\ / ) { + if ( $multiline_mode eq 'requsts per con' ) { + $line =~ s/.*\:\ //; + if ( $line > $to_return->{data}{max_reqs} ) { + $to_return->{data}{max_reqs} = $line; + } + } + elsif ( $multiline_mode eq 'method' ) { + $line =~ s/^ +//; + my ( $count, $method ) = split( /\ \:\ /, $line ); + $method = lc($method); + if ( defined( $to_return->{data}{ 'req_' . $method } ) ) { + $to_return->{data}{ 'req_' . $method } = $count; + } + } + elsif ( $multiline_mode eq 'version' ) { + $line =~ s/^ +//; + my ( $count, $version ) = split( /\ \:\ /, $line ); + $version = lc($version); + $version =~ s/http\//ver_/; + $version =~ s/\./_/g; + if ( defined( $to_return->{data}{$version} ) ) { + $to_return->{data}{$version} = $count; + } + } + elsif ( $multiline_mode eq 'response' ) { + $line =~ s/^ +//; + my ( $count, $response ) = split( /\ \:\ /, $line ); + if ( defined( $to_return->{data}{ 'resp_' . $response } ) ) { + + $to_return->{data}{ 'resp_' . $response } = $count; + } + elsif ( $response =~ /^2\d\d/ ) { + $to_return->{data}{resp_2xx_other} = $to_return->{data}{resp_2xx_other} + $count; + } + elsif ( $response =~ /^3\d\d/ ) { + $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_3xx_other} + $count; + } + elsif ( $response =~ /^4\d\d/ ) { + $to_return->{data}{resp_4xx_other} = $to_return->{data}{resp_4xx_other} + $count; + } + elsif ( $response =~ /^5\d\d/ ) { + $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_5xx_other} + $count; + } + + if ( $response =~ /^1\d\d/ ) { + $to_return->{data}{resp_1xx} = $to_return->{data}{resp_1xx} + $count; + } + elsif ( $response =~ /^2\d\d/ ) { + $to_return->{data}{resp_2xx} = $to_return->{data}{resp_2xx} + $count; + } + elsif ( $response =~ /^3\d\d/ ) { + $to_return->{data}{resp_3xx} = $to_return->{data}{resp_3xx} + $count; + } + elsif ( $response =~ /^4\d\d/ ) { + $to_return->{data}{resp_4xx} = $to_return->{data}{resp_4xx} + $count; + } + elsif ( $response =~ /^5\d\d/ ) { + $to_return->{data}{resp_5xx} = $to_return->{data}{resp_5xx} + $count; + } + } + } + else { + $multiline_mode = ''; + } +} + +my @keys_tmp = keys( %{$unique_bdomains} ); +$to_return->{data}{unique_bdomains} = @keys_tmp; +@keys_tmp = keys( %{$unique_bdomains_np} ); +$to_return->{data}{unique_bdomains_np} = @keys_tmp; +@keys_tmp = keys( %{$unique_domains} ); +$to_return->{data}{unique_domains} = @keys_tmp; +@keys_tmp = keys( %{$unique_domains_np} ); +$to_return->{data}{unique_domains_np} = @keys_tmp; + +if ( $to_return->{data}{unique_domains} > 0 && $to_return->{data}{unique_bdomains} > 0 ) { + $to_return->{data}{ubd_per} = $to_return->{data}{unique_bdomains} / $to_return->{data}{unique_domains}; + $to_return->{data}{ubd_np_per} = $to_return->{data}{unique_bdomains_np} / $to_return->{data}{unique_domains_np}; +} + +# percentage of requests blocked +if ( $to_return->{data}{blocks} > 0 && $to_return->{data}{client_requests} > 0 ) { + $to_return->{data}{block_percent} = $to_return->{data}{blocks} / $to_return->{data}{client_requests}; +} + +print $json->encode($to_return); +if ( !$opts{p} ) { + print "\n"; +} +exit 0; From 449b0528fbdf42b5374698099513df2270ca6da0 Mon Sep 17 00:00:00 2001 From: Henne Van Och Date: Wed, 1 Mar 2023 01:09:43 +0100 Subject: [PATCH 366/497] Improve docker stats (#450) --- snmp/docker-stats.py | 112 +++++++++++++++++++++++++++++++++++++++++++ snmp/docker-stats.sh | 38 --------------- 2 files changed, 112 insertions(+), 38 deletions(-) create mode 100644 snmp/docker-stats.py delete mode 100644 snmp/docker-stats.sh diff --git a/snmp/docker-stats.py b/snmp/docker-stats.py new file mode 100644 index 000000000..7460c6d4b --- /dev/null +++ b/snmp/docker-stats.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python3 +import datetime +import json +import subprocess + +from dateutil import parser + +VERSION = 2 +ONLY_RUNNING_CONTAINERS = True + + +def run(cmd): + res = subprocess.check_output(cmd, stderr=subprocess.STDOUT) + return res + + +def inspectContainer(container): + raw = run(["docker", "inspect", "-s", container]) + data = json.loads(raw) + + return data + + +def getStats(): + command = [ + "docker", + "stats", + "--no-stream", + "--no-trunc", + "--format", + "{{ json . }}", + ] + if not ONLY_RUNNING_CONTAINERS: + command.append("-a") + raw = run(command) + lines = raw.split(b"\n") + containers = [] + for line in lines[0:-1]: + containers.append(json.loads(line)) + + return containers + + +def dump(): + containers = [] + try: + stats_containers = getStats() + except subprocess.CalledProcessError as e: + print( + json.dumps( + { + "version": VERSION, + "data": containers, + "error": e.returncode, + "errorString": e.output.decode("utf-8"), + } + ) + ) + return + + for container in stats_containers: + try: + inspected_container = inspectContainer(container["Name"]) + except subprocess.CalledProcessError: + continue + + started_at = parser.parse(inspected_container[0]["State"]["StartedAt"]) + + if inspected_container[0]["State"]["Running"]: + finished_at = datetime.datetime.now(started_at.tzinfo) + else: + finished_at = parser.parse(inspected_container[0]["State"]["FinishedAt"]) + + uptime = finished_at - started_at + + containers.append( + { + "container": container["Name"], + "pids": container["PIDs"], + "memory": { + "used": container["MemUsage"].split(" / ")[0], + "limit": container["MemUsage"].split(" / ")[1], + "perc": container["MemPerc"], + }, + "cpu": container["CPUPerc"], + "size": { + "size_rw": inspected_container[0]["SizeRw"], + "size_root_fs": inspected_container[0]["SizeRootFs"], + }, + "state": { + "status": inspected_container[0]["State"]["Status"], + "started_at": inspected_container[0]["State"]["StartedAt"], + "finished_at": inspected_container[0]["State"]["FinishedAt"], + "uptime": round(uptime.total_seconds()), + }, + } + ) + + print( + json.dumps( + { + "version": VERSION, + "data": containers, + "error": "0", + "errorString": "", + } + ) + ) + + +if __name__ == "__main__": + dump() diff --git a/snmp/docker-stats.sh b/snmp/docker-stats.sh deleted file mode 100644 index 7ac7473f2..000000000 --- a/snmp/docker-stats.sh +++ /dev/null @@ -1,38 +0,0 @@ -#!/usr/bin/env bash - -VERSION=1 - -function dockerStatsFormat() { - cat <&1) -ERROR=$? -if [ $ERROR -ne 0 ];then - ERROR_STRING=${STATS} - unset STATS -fi -jq -nMc \ - --slurpfile stats <(echo "${STATS:-}") \ - --arg version "${VERSION:-1}" \ - --arg error "${ERROR:-0}" \ - --arg errorString "${ERROR_STRING:-}" \ - '{"version": $version, "data": $stats, "error": $error, "errorString": $errorString }' - -# vim: tabstop=2:shiftwidth=2:expandtab: From 36c2172e4f8f14add06e4d13647bbc1e9c12fa73 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Mar 2023 14:58:18 -0600 Subject: [PATCH 367/497] add new ZFS extend, for both Linux and FreeBSD (#460) --- snmp/zfs | 390 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 390 insertions(+) create mode 100755 snmp/zfs diff --git a/snmp/zfs b/snmp/zfs new file mode 100755 index 000000000..8a873c7f1 --- /dev/null +++ b/snmp/zfs @@ -0,0 +1,390 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for ZFS for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. + +=head1 SNMPD SETUP EXAMPLES + + extend zfs /etc/snmp/zfs + +=cut + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use File::Slurp; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "FreeBSD ZFS v3 stats extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; + +#gets the options +my %opts = (); +getopts( 'p', \%opts ); + +#process each pool and shove them into JSON +my $zpool_output = `/sbin/zpool list -pH`; +my @pools = split( /\n/, $zpool_output ); +my $pools_int = 0; +$tojson{online} = 0; +$tojson{degraded} = 0; +$tojson{offline} = 0; +$tojson{faulted} = 0; +$tojson{health} = 1; +$tojson{unavail} = 0; +$tojson{removed} = 0; +$tojson{unknown} = 0; +my @toShoveIntoJSON; + +while ( defined( $pools[$pools_int] ) ) { + my %newPool; + + my $pool = $pools[$pools_int]; + chomp($pool); + $pool =~ s/[\t\ ]+/,/g; + $pool =~ s/\,\-\,/\,0\,/g; + $pool =~ s/\%//g; + $pool =~ s/\,([0-1\.]*)x\,/,$1,/; + + ( + $newPool{name}, $newPool{size}, $newPool{alloc}, $newPool{free}, + $newPool{ckpoint}, $newPool{expandsz}, $newPool{frag}, $newPool{cap}, + $newPool{dedup}, $newPool{health}, $newPool{altroot} + ) = split( /\,/, $pool ); + + if ( $newPool{health} eq 'ONLINE' ) { + $newPool{health} = 0; + $tojson{online}++; + } + elsif ( $newPool{health} eq 'DEGRADED' ) { + $newPool{health} = 1; + $tojson{health} = 0; + $tojson{degraded}++; + } + elsif ( $newPool{health} eq 'OFFLINE' ) { + $newPool{health} = 2; + $tojson{offline}++; + } + elsif ( $newPool{health} eq 'FAULTED' ) { + $newPool{health} = 3; + $tojson{health} = 0; + $tojson{faulted}++; + } + elsif ( $newPool{health} eq 'UNAVAIL' ) { + $newPool{health} = 4; + $tojson{health} = 0; + $tojson{unavail}++; + } + elsif ( $newPool{health} eq 'REMOVED' ) { + $newPool{health} = 5; + $tojson{health} = 0; + $tojson{removed}++; + } + else { + $newPool{health} = 6; + $tojson{health} = 0; + $tojson{unknown}++; + } + + if ( $newPool{expandsz} eq '-' ) { + $newPool{expandsz} = 0; + } + + my $iostat = `zpool iostat -l -q -p -H $newPool{name}`; + chomp($iostat); + $iostat =~ s/\t/,/g; + $iostat =~ s/\,\-\,\-\,/\,0\,0\,/g; + $iostat =~ s/\%//g; + $iostat =~ s/\,([0-1\.]*)x\,/,$1,/; + chomp($iostat); + my $parsed; + ( + $parsed, $parsed, $newPool{operations_r}, $newPool{operations_w}, + $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w}, + $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w}, + $newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p}, + $newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p}, + $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, + $newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a}, + ) = split( /\,/, $iostat ); + + my @pool_keys = keys(%newPool); + foreach my $item (@pool_keys) { + if ( $item ne 'altroot' && $newPool{$item} eq '-' ) { + $newPool{$item} = 0; + } + } + + push( @toShoveIntoJSON, \%newPool ); + + $pools_int++; +} +$tojson{pools} = \@toShoveIntoJSON; + +# +# OS specific bits +# +my $stats_stuff = {}; +if ( $^O eq 'freebsd' ) { + my @to_pull = ( 'kstat.zfs', 'vfs.zfs', ); + my @sysctls_pull = `/sbin/sysctl -q @to_pull`; + foreach my $stat (@sysctls_pull) { + chomp($stat); + my ( $var, $val ) = split( /:/, $stat, 2 ); + + # If $val is empty, skip it. Likely a var with a newline before + # the data so it is trying to "split" the data. + if ( length $val ) { + $val =~ s/^ //; + $var =~ s/^.*\.arcstats\.//; + $stats_stuff->{$var} = $val; + } + } + +} +elsif ( $^O eq 'linux' ) { + my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats'); + foreach my $line (@arcstats_lines) { + chomp($line); + my ( $stat, $int, $value ) = split( /[\t\ ]+/, $line, 3 ); + $stats_stuff->{$stat} = $value; + } +} + +# does not seem to exist for me, but some of these don't seem to be created till needed +if ( !defined( $stats_stuff->{"recycle_miss"} ) ) { + $stats_stuff->{"recycle_miss"} = 0; +} + +## +## ARC misc +## +$tojson{deleted} = $stats_stuff->{"deleted"}; +$tojson{evict_skip} = $stats_stuff->{"evict_skip"}; +$tojson{mutex_skip} = $stats_stuff->{'mutex_miss'}; +$tojson{recycle_miss} = $stats_stuff->{"recycle_miss"}; + +## +## ARC size +## +my $target_size_percent = $stats_stuff->{"c"} / $stats_stuff->{"c_max"} * 100; +my $arc_size_percent = $stats_stuff->{"size"} / $stats_stuff->{"c_max"} * 100; +my $target_size_adaptive_ratio = $stats_stuff->{"c"} / $stats_stuff->{"c_max"}; +my $min_size_percent = $stats_stuff->{"c_min"} / $stats_stuff->{"c_max"} * 100; + +$tojson{arc_size} = $stats_stuff->{"size"}; +$tojson{target_size_max} = $stats_stuff->{"c_max"}; +$tojson{target_size_min} = $stats_stuff->{"c_min"}; +$tojson{target_size} = $stats_stuff->{"c"}; +$tojson{target_size_per} = $target_size_percent; +$tojson{arc_size_per} = $arc_size_percent; +$tojson{target_size_arat} = $target_size_adaptive_ratio; +$tojson{min_size_per} = $min_size_percent; + +## +## ARC size breakdown +## +my $mfu_size; +my $recently_used_percent; +my $frequently_used_percent; +if ( $stats_stuff->{"size"} >= $stats_stuff->{"c"} ) { + $mfu_size = $stats_stuff->{"size"} - $stats_stuff->{"p"}; + $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"size"} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{"size"} * 100; +} +else { + $mfu_size = $stats_stuff->{"c"} - $stats_stuff->{"p"}; + $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"c"} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{"c"} * 100; +} + +$tojson{p} = $stats_stuff->{"p"}; + +## +## ARC efficiency +## +my $arc_hits = $stats_stuff->{"hits"}; +my $arc_misses = $stats_stuff->{"misses"}; +my $demand_data_hits = $stats_stuff->{"demand_data_hits"}; +my $demand_data_misses = $stats_stuff->{"demand_data_misses"}; +my $demand_metadata_hits = $stats_stuff->{"demand_metadata_hits"}; +my $demand_metadata_misses = $stats_stuff->{"demand_metadata_misses"}; +my $mfu_ghost_hits = $stats_stuff->{"mfu_ghost_hits"}; +my $mfu_hits = $stats_stuff->{"mfu_hits"}; +my $mru_ghost_hits = $stats_stuff->{"mru_ghost_hits"}; +my $mru_hits = $stats_stuff->{"mru_hits"}; +my $prefetch_data_hits = $stats_stuff->{"prefetch_data_hits"}; +my $prefetch_data_misses = $stats_stuff->{"prefetch_data_misses"}; +my $prefetch_metadata_hits = $stats_stuff->{"prefetch_metadata_hits"}; +my $prefetch_metadata_misses = $stats_stuff->{"prefetch_metadata_misses"}; + +## +## ARC efficiency, common +## + +my $anon_hits = $arc_hits - ( $mfu_hits + $mru_hits + $mfu_ghost_hits + $mru_ghost_hits ); +my $arc_accesses_total = $arc_hits + $arc_misses; +my $demand_data_total = $demand_data_hits + $demand_data_misses; +my $prefetch_data_total = $prefetch_data_hits + $prefetch_data_misses; +my $real_hits = $mfu_hits + $mru_hits; + +my $cache_hit_percent = $arc_hits / $arc_accesses_total * 100; +my $cache_miss_percent = $arc_misses / $arc_accesses_total * 100; +my $actual_hit_percent = $real_hits / $arc_accesses_total * 100; + +my $data_demand_percent = 0; +if ( $demand_data_total != 0 ) { + $data_demand_percent = $demand_data_hits / $demand_data_total * 100; +} + +my $data_prefetch_percent = 0; +if ( $prefetch_data_total != 0 ) { + $data_prefetch_percent = $prefetch_data_hits / $prefetch_data_total * 100; +} + +my $anon_hits_percent; +if ( $anon_hits != 0 ) { + $anon_hits_percent = $anon_hits / $arc_hits * 100; +} +else { + $anon_hits_percent = 0; +} + +my $mru_percent = $mru_hits / $arc_hits * 100; +my $mfu_percent = $mfu_hits / $arc_hits * 100; +my $mru_ghost_percent = $mru_ghost_hits / $arc_hits * 100; +my $mfu_ghost_percent = $mfu_ghost_hits / $arc_hits * 100; + +my $demand_hits_percent = $demand_data_hits / $arc_hits * 100; +my $prefetch_hits_percent = $prefetch_data_hits / $arc_hits * 100; +my $metadata_hits_percent = $demand_metadata_hits / $arc_hits * 100; +my $prefetch_metadata_hits_percent = $prefetch_metadata_hits / $arc_hits * 100; + +my $demand_misses_percent = $demand_data_misses / $arc_misses * 100; +my $prefetch_misses_percent = $prefetch_data_misses / $arc_misses * 100; +my $metadata_misses_percent = $demand_metadata_misses / $arc_misses * 100; +my $prefetch_metadata_misses_percent = $prefetch_metadata_misses / $arc_misses * 100; + +# ARC misc. efficient stats +$tojson{arc_hits} = $arc_hits; +$tojson{arc_misses} = $arc_misses; +$tojson{demand_data_hits} = $demand_data_hits; +$tojson{demand_data_misses} = $demand_data_misses; +$tojson{demand_meta_hits} = $demand_metadata_hits; +$tojson{demand_meta_misses} = $demand_metadata_misses; +$tojson{mfu_ghost_hits} = $mfu_ghost_hits; +$tojson{mfu_hits} = $mfu_hits; +$tojson{mru_ghost_hits} = $mru_ghost_hits; +$tojson{mru_hits} = $mru_hits; +$tojson{pre_data_hits} = $prefetch_data_hits; +$tojson{pre_data_misses} = $prefetch_data_misses; +$tojson{pre_meta_hits} = $prefetch_metadata_hits; +$tojson{pre_meta_misses} = $prefetch_metadata_misses; +$tojson{anon_hits} = $anon_hits; +$tojson{arc_accesses_total} = $arc_accesses_total; +$tojson{demand_data_total} = $demand_data_total; +$tojson{pre_data_total} = $prefetch_data_total; +$tojson{real_hits} = $real_hits; + +# ARC efficient percents +$tojson{cache_hits_per} = $cache_hit_percent; +$tojson{cache_miss_per} = $cache_miss_percent; +$tojson{actual_hit_per} = $actual_hit_percent; +$tojson{data_demand_per} = $data_demand_percent; +$tojson{data_pre_per} = $data_prefetch_percent; +$tojson{anon_hits_per} = $anon_hits_percent; +$tojson{mru_per} = $mru_percent; +$tojson{mfu_per} = $mfu_percent; +$tojson{mru_ghost_per} = $mru_ghost_percent; +$tojson{mfu_ghost_per} = $mfu_ghost_percent; +$tojson{demand_hits_per} = $demand_hits_percent; +$tojson{pre_hits_per} = $prefetch_hits_percent; +$tojson{meta_hits_per} = $metadata_hits_percent; +$tojson{pre_meta_hits_per} = $prefetch_metadata_hits_percent; +$tojson{demand_misses_per} = $demand_misses_percent; +$tojson{pre_misses_per} = $prefetch_misses_percent; +$tojson{meta_misses_per} = $metadata_misses_percent; +$tojson{pre_meta_misses_per} = $prefetch_metadata_misses_percent; + +$tojson{mfu_size} = $mfu_size; +$tojson{rec_used_per} = $recently_used_percent; +$tojson{freq_used_per} = $frequently_used_percent; + +## +## pull in the l2 stats +## +my @l2_keys = grep( /l2\_/, keys( %{$stats_stuff} ) ); +foreach my $item (@l2_keys) { + $tojson{$item} = $stats_stuff->{$item}; +} +$tojson{l2_errors} = $tojson{l2_writes_error} + $tojson{l2_cksum_bad} + $tojson{l2_io_error}; +$tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; + +## +## print the results +## + +my %head_hash; +$head_hash{'data'} = \%tojson; +$head_hash{'version'} = 3; +$head_hash{'error'} = 0; +$head_hash{'errorString'} = ''; + +my $j = JSON->new; + +if ( $opts{p} ) { + $j->pretty(1); +} + +print $j->encode( \%head_hash ); + +if ( !$opts{p} ) { + print "\n"; +} + +exit 0; From 93ea47c81564e1e455a8f4299c3b3b9a7658097c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 26 Apr 2023 09:36:17 -0600 Subject: [PATCH 368/497] opensearch/elasticsearch fix as apparently ._all.total.indexing.is_throttled will sometimes be undef (#464) --- snmp/opensearch | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 2b133141f..5b731b2eb 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2022, Zane C. Bowers-Hadley +#Copyright (c) 2023, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -159,7 +159,7 @@ $to_return->{data}{c_in_fl_fetch} = $health_json->{number_of_in_flight_fetc $to_return->{data}{c_task_max_in_time} = $health_json->{task_max_waiting_in_queue_millis}; $to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as_number}; -# status color to int, nagious style +# status color to int, nagios style # green / ok = 0 # yellow / warning = 1 # red / critical = 2 @@ -194,7 +194,9 @@ $to_return->{data}{ti_del_time} = $stats_json->{_all}{total}{indexing}{del $to_return->{data}{ti_noop_up_total} = $stats_json->{_all}{total}{indexing}{noop_update_total}; $to_return->{data}{ti_throttled_time} = $stats_json->{_all}{total}{indexing}{throttle_time_in_millis}; -if ( $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { +if ( defined( $stats_json->{_all}{total}{indexing}{is_throttled} ) + && $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) +{ $to_return->{data}{ti_throttled} = 1; } else { From ce4c43ca08d6946ad9f1b2a38d534fbcf9363413 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 28 Apr 2023 07:47:56 -0600 Subject: [PATCH 369/497] minor doc changes and add gzip+base64 compression to snmp/zfs (#463) --- snmp/zfs | 51 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 45 insertions(+), 6 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 8a873c7f1..d80e73e2e 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -10,12 +10,31 @@ For more information, see Lnew; -if ( $opts{p} ) { +if ( $opts{p} && ! $opts{b} ) { $j->pretty(1); } -print $j->encode( \%head_hash ); +my $return_string = $j->encode( \%head_hash ); -if ( !$opts{p} ) { - print "\n"; +if ( !$opts{p} && ! $opts{b} ) { + print $return_string."\n"; + exit 0; +}elsif (!$opts{b}) { + print $return_string; + exit 0; +} + +my $compressed = encode_base64( gzip($return_string) ); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +if ( length($compressed) > length($return_string) ) { + print $return_string."\n"; +} +else { + print $compressed; } exit 0; From f1c5b0653d19e523bf82033c21bbea6ba6193c81 Mon Sep 17 00:00:00 2001 From: "Shao Yu-Lung (Allen)" Date: Fri, 28 Apr 2023 21:50:07 +0800 Subject: [PATCH 370/497] feat: OS Updates support agent (#444) --- snmp/osupdate | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/osupdate b/snmp/osupdate index 87e16873f..8ce829836 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -35,6 +35,7 @@ CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ +echo '<<>>' if command -v zypper &>/dev/null ; then # OpenSUSE # shellcheck disable=SC2086 From 7c6568fdab9256dbdb12fe6dd212dc114ec9b7dc Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 9 May 2023 08:23:39 -0700 Subject: [PATCH 371/497] Adding linux_config_files snmp script to monitor configuration file updates (#453) --- snmp/linux_config_files.py | 173 +++++++++++++++++++++++++++++++++++++ 1 file changed, 173 insertions(+) create mode 100644 snmp/linux_config_files.py diff --git a/snmp/linux_config_files.py b/snmp/linux_config_files.py new file mode 100644 index 000000000..4544e6ea2 --- /dev/null +++ b/snmp/linux_config_files.py @@ -0,0 +1,173 @@ +#!/usr/bin/env python +# +# Name: linux_config_files Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "pkg_tool_cmd" output for ingestion into +# LibreNMS via the linux_config_files application. Additional distribution +# support may be added. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/linux_config_files.py +# 2. Edit your snmpd.conf and include: +# extend linux_config_files /etc/snmp/linux_config_files.py +# 3. (Optional, if RPM-based) Create a /etc/snmp/linux_config_files.json file and specify: +# a.) "pkg_system" - String designating the distribution name of the system. At +# the moment only "rpm" is supported. +# b.) "pkg_tool_cmd" - String path to the package tool binary ["/sbin/rpmconf"] +# ``` +# { +# "pkg_system": "rpm", +# "pkg_tool_cmd": "/bin/rpmconf", +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/linux_config_files.json" +PKG_SYSTEM = "rpm" +PKG_TOOL_ARGS = {"rpm": ["--all", "--test"]} +PKG_TOOL_CMD = {"rpm": "/sbin/rpmconf"} + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": "%s: '%s'" % (error_name, err), + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + pkg_system: The package management used by the system. + pkg_tool_cmd: The full package tool command to execute. + """ + pkg_system = PKG_SYSTEM + pkg_tool_cmd = [PKG_TOOL_CMD[pkg_system]] + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r") as json_file: + config_file = json.load(json_file) + if "pkg_system" in config_file: + pkg_system = config_file["pkg_system"].strip().lower() + pkg_tool_cmd = ( + [config_file["pkg_tool_cmd"].strip().lower()] + if "pkg_tool_cmd" in config_file + else [PKG_TOOL_CMD[pkg_system]] + ) + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Create and return pkg_system & full pkg_tool_cmd command. + pkg_tool_cmd.extend(PKG_TOOL_ARGS[pkg_system]) + return pkg_system, pkg_tool_cmd + + +def command_executor(pkg_system, pkg_tool_cmd): + """ + command_executor(): Execute the pkg_tool_cmd command and return the output. + + Inputs: + pkg_system: The package management used by the system. + pkg_tool_cmd: The full package tool command to execute. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + poutput = None + try: + # Execute pkg_tool_cmd command + poutput = subprocess.check_output( + pkg_tool_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + # Per rpmconf man page, an error code of 5 indicates there are conf file + # to merge, so disregard that error code. + if err.returncode != 5 or pkg_system != "rpm": + error_handler("Command Execution Error", err) + poutput = err.output + return poutput + + +def output_parser(pkg_system, cmd_output): + """ + output_parser(): Parses stdout of executed command and returns updated dictionary + with parsed data. + + Inputs: + pkg_system: The package management used by the system. + cmd_output: stdout of the executed command. + Outputs: + output_data: Dictionary updated with parsed data. + """ + output_data = { + "errorString": "", + "error": 0, + "version": 1, + "data": {"number_of_confs": None}, + } + + if pkg_system == "rpm": + if not cmd_output: + output_data["data"]["number_of_confs"] = 0 + else: + output_data["data"]["number_of_confs"] = len( + cmd_output.decode("utf-8").strip().split("\n") + ) + + return output_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and unit stdout parsing. Then it prints out the expected json output + for the pkg_tool_cmd application. + + Inputs: + None + Outputs: + None + """ + # Parse configuration file. + pkg_system, pkg_tool_cmd = config_file_parser() + + # Execute pkg_tool_cmd command and parse output. + cmd_output = command_executor(pkg_system, pkg_tool_cmd) + + # Parse command output. + output_data = output_parser(pkg_system, cmd_output) + + # Print json dumps of dictionary. + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From e415f6afffc8e18446cf33f0774bc50e2dae11dd Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 23 May 2023 11:25:30 -0700 Subject: [PATCH 372/497] Removing unnecessary backslashes from postgres script. (#468) --- snmp/postgres | 44 ++++++++++++++++++++++---------------------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/snmp/postgres b/snmp/postgres index 764484000..c0154da4f 100644 --- a/snmp/postgres +++ b/snmp/postgres @@ -12,11 +12,11 @@ # and/or other materials provided with the distribution. # #THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED #WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. #IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, -#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, #DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF #LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF @@ -91,27 +91,27 @@ BEGIN{ db=""; ignorePG='$ignorePG'; toAdd=1; -} +} { - gsub(/dbname\:/, ""); - gsub(/backends\:/, ""); - gsub(/commits\:/, ""); - gsub(/rollbacks\:/, ""); - gsub(/idxscan\:/, ""); - gsub(/idxtupread\:/, ""); - gsub(/idxtupfetch\:/, ""); - gsub(/idxblksread\:/, ""); - gsub(/idxblkshit\:/, ""); - gsub(/seqscan\:/, ""); - gsub(/seqtupread\:/, ""); - gsub(/ret\:/, ""); - gsub(/fetch\:/, ""); - gsub(/ins\:/, ""); - gsub(/upd\:/, ""); - gsub(/del\:/, ""); + gsub(/dbname:/, ""); + gsub(/backends:/, ""); + gsub(/commits:/, ""); + gsub(/rollbacks:/, ""); + gsub(/idxscan:/, ""); + gsub(/idxtupread:/, ""); + gsub(/idxtupfetch:/, ""); + gsub(/idxblksread:/, ""); + gsub(/idxblkshit:/, ""); + gsub(/seqscan:/, ""); + gsub(/seqtupread:/, ""); + gsub(/ret:/, ""); + gsub(/fetch:/, ""); + gsub(/ins:/, ""); + gsub(/upd:/, ""); + gsub(/del:/, ""); #must be processed last or they step on other gsub - gsub(/read\:/, ""); - gsub(/hit\:/, ""); + gsub(/read:/, ""); + gsub(/hit:/, ""); if ( $18 == "postgres" ){ if ( ignorePG == 1 ){ toAdd=0 } From a64641ecc5c4e5b018bf07fb6c0a29cb82c55b50 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 10 Jun 2023 15:55:57 -0500 Subject: [PATCH 373/497] add Linux Softnet Stat extend (#470) --- snmp/linux_softnet_stat | 144 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 144 insertions(+) create mode 100755 snmp/linux_softnet_stat diff --git a/snmp/linux_softnet_stat b/snmp/linux_softnet_stat new file mode 100755 index 000000000..f7987a391 --- /dev/null +++ b/snmp/linux_softnet_stat @@ -0,0 +1,144 @@ +#!/usr/bin/env perl + +=head1 DESCRIPTION + +This is a SNMP extend for monitoring /proc/net/softnet_stat on Linux for use with LibreNMS. + +For more information, see L. + +=head1 SWITCHES + +=head2 -p + +Pretty print the JSON. If used with -b, this switch will be ignored. + +=head2 -b + +Gzip the output and convert to Base64. + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Std; +use File::Slurp; +use MIME::Base64; +use Gzip::Faster; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "Linux softnet stats extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + +} + +#this will be dumped to json at the end +my %tojson; +$tojson{totals} = { + backlog_length => 0, + cpu_collision => 0, + flow_limit => 0, + packet_dropped => 0, + packets => 0, + received_rps => 0, + time_squeeze => 0, +}; +$tojson{cores} = []; +$tojson{core_count} = 0; +$tojson{budget} = `sysctl net.core.netdev_budget 2> /dev/null`; +$tojson{budget_usecs} = `sysctl net.core.netdev_budget_usecs 2> /dev/null`; +chomp( $tojson{budget} ); +chomp( $tojson{budget_usecs} ); +$tojson{budget} =~ s/.*\=[\ \t]*//; +$tojson{budget_usecs} =~ s/.*\=[\ \t]*//; + +if ( $tojson{budget} !~ /^[0-9]+$/ ) { + $tojson{budget} = 'unknown'; +} +if ( $tojson{budget_usecs} !~ /^[0-9]+$/ ) { + $tojson{budget_usecs} = 'unknown'; +} + +#gets the options +my %opts = (); +getopts( 'pb', \%opts ); + +my $j = JSON->new; + +if ( $opts{p} && !$opts{b} ) { + $j->pretty(1); + $j->canonical(1); +} + +## +## read it all in +## + +my $lines_raw = read_file('/proc/net/softnet_stat'); +my @lines_split = split( /\n/, $lines_raw ); + +# not all linux kernel versions include softnet_backlog_len or index +my @to_total = keys( %{ $tojson{totals} } ); +foreach my $line (@lines_split) { + my %found; + ( + $found{packets}, $found{packet_drop}, $found{time_squeeze}, $found{zero4}, + $found{zero5}, $found{zero6}, $found{zero7}, $found{zero8}, + $found{cpu_collision}, $found{received_rps}, $found{flow_limit}, $found{backlog_length}, + $found{index} + ) = split( /[\ \t]+/, $line ); + + push( + @{ $tojson{cores} }, + { + core => $tojson{core_count}, + } + ); + + foreach my $item (@to_total) { + if ( !defined( $found{$item} ) ) { + $found{$item} = 0; + } else { + $found{$item} = hex( $found{$item} ); + } + $tojson{totals}{$item} += $found{$item}; + $tojson{cores}[ $tojson{core_count} ]{$item} = $found{$item}; + } + + $tojson{core_count}++; +} ## end foreach my $line (@lines_split) + +## +## print the results +## + +my %head_hash; +$head_hash{'data'} = \%tojson; +$head_hash{'version'} = 1; +$head_hash{'error'} = 0; +$head_hash{'errorString'} = ''; + +my $return_string = $j->encode( \%head_hash ); + +if ( !$opts{p} && !$opts{b} ) { + print $return_string. "\n"; + exit 0; +} elsif ( !$opts{b} ) { + print $return_string; + exit 0; +} + +my $compressed = encode_base64( gzip($return_string) ); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +if ( length($compressed) > length($return_string) ) { + print $return_string. "\n"; +} else { + print $compressed; +} + +exit 0; From 53db4101c328224c5b7e64879ffe4877db2c46cf Mon Sep 17 00:00:00 2001 From: Wheel Date: Mon, 12 Jun 2023 21:06:57 -0400 Subject: [PATCH 374/497] Update osupdate (#466) #444 broke osupdate script --- snmp/osupdate | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/osupdate b/snmp/osupdate index 8ce829836..87e16873f 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -35,7 +35,6 @@ CMD_APK=' version' ################################################################ # Don't change anything unless you know what are you doing # ################################################################ -echo '<<>>' if command -v zypper &>/dev/null ; then # OpenSUSE # shellcheck disable=SC2086 From 679957d88701749086ee7047788ba4dcebcdf85c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 12 Jun 2023 20:16:24 -0500 Subject: [PATCH 375/497] add a line to uncomment if using as a agent (#471) --- snmp/osupdate | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/osupdate b/snmp/osupdate index 87e16873f..9949fba44 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -32,6 +32,9 @@ CMD_PKG=' audit -q -F' BIN_APK='/sbin/apk' CMD_APK=' version' +# If using this as a agent and not a extend, uncomment the line below. +#echo '<<>>' + ################################################################ # Don't change anything unless you know what are you doing # ################################################################ From 4ee491088729f89e21ecf55f0344a4d5caae26e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?J=C3=A4mes=20M=C3=A9n=C3=A9trey?= Date: Mon, 19 Jun 2023 08:12:15 +0200 Subject: [PATCH 376/497] Smart: fetch the values 177, 231 and 233 as normalized instead of raw (#472) --- snmp/smart | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/snmp/smart b/snmp/smart index ef1304b0d..d3d35bfbd 100755 --- a/snmp/smart +++ b/snmp/smart @@ -326,6 +326,7 @@ foreach my $line ( @disks ){ if ( $line =~ /^[0123456789]+ / ) { my @lineA=split(/\ /, $line, 10); my $raw=$lineA[9]; + my $normalized=$lineA[3]; my $id=$lineA[0]; # Crucial SSD @@ -339,21 +340,27 @@ foreach my $line ( @disks ){ ( $id == 5 ) || ( $id == 10 ) || ( $id == 173 ) || - ( $id == 177 ) || ( $id == 183 ) || ( $id == 184 ) || ( $id == 187 ) || ( $id == 196 ) || ( $id == 197 ) || ( $id == 198 ) || - ( $id == 199 ) || - ( $id == 231 ) || - ( $id == 233 ) + ( $id == 199 ) ) { my @rawA=split( /\ /, $raw ); $IDs{$id}=$rawA[0]; } + # single int normalized values + if ( + ( $id == 177 ) || + ( $id == 231 ) || + ( $id == 233 ) + ) { + $IDs{$id}=int($normalized); + } + # 9, power on hours if ( $id == 9 ) { my @runtime=split(/[\ h]/, $raw); From 9695af92d17a3f8ec092a0483af327c6058f43cd Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 19 Jun 2023 01:46:41 -0500 Subject: [PATCH 377/497] privoxy fix nog_conns and actually do compression now (#473) * fix nog_conns parsing for privoxy * add compression support --- snmp/privoxy | 128 ++++++++++++++++++++++----------------------------- 1 file changed, 56 insertions(+), 72 deletions(-) diff --git a/snmp/privoxy b/snmp/privoxy index b52405910..26e87cddd 100755 --- a/snmp/privoxy +++ b/snmp/privoxy @@ -47,6 +47,8 @@ use File::ReadBackwards; use JSON; use Time::Piece; use IPC::Run3; +use MIME::Base64; +use Gzip::Faster; # get the current time my $t = localtime; @@ -58,8 +60,7 @@ if ( $t->tzoffset =~ /^-/ ) { my $offset = $t->tzoffset; $offset =~ s/^\-//; $till = $till - $offset; -} -else { +} else { my $offset = $t->tzoffset; $offset =~ s/^\+//; $till = $till + $offset; @@ -161,7 +162,7 @@ if ($@) { print "\n"; } exit 0; -} +} ## end if ($@) my $read_file = 1; @@ -197,8 +198,7 @@ while ( defined( $log_line = $bw->readline ) # otherwise add it if ( $log_t->epoch < $till ) { $read_file = 0; - } - else { + } else { $lines = $log_line . $lines; if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Crunch\:\ Blocked\:\ / ) { @@ -218,15 +218,15 @@ while ( defined( $log_line = $bw->readline ) $log_line =~ s/\:\d+$//; $unique_domains_np->{$log_line_tmp} = 1; } - } - } - } + } ## end if ( $log_line =~ /^\d\d\d\d\-\d\d\-\d\d\ \d\d\:\d\d\:\d\d.*Connect\:\ to\ /) + } ## end else [ if ( $log_t->epoch < $till ) ] + } ## end if ( defined($log_t) ) # if we don't have log_t, just add the line and lot the log parser figure out what it is else { $lines = $log_line . $lines; } -} +} ## end while ( defined( $log_line = $bw->readline ) ...) my $stdout; my $stderr; @@ -246,92 +246,77 @@ foreach my $line (@stdout_split) { $multiline_mode = ''; $line =~ s/.*\:\ //; $to_return->{data}{client_requests} = $line; - } - elsif ( $line =~ /^Crunches\:/ ) { + } elsif ( $line =~ /^Crunches\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{crunches} = $line; - } - elsif ( $line =~ /^Blocks:/ ) { + } elsif ( $line =~ /^Blocks:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{blocks} = $line; - } - elsif ( $line =~ /^Fast\ redirections\:/ ) { + } elsif ( $line =~ /^Fast\ redirections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{fast_redirs} = $line; - } - elsif ( $line =~ /^Connection\ timeouts\:/ ) { + } elsif ( $line =~ /^Connection\ timeouts\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{con_timeouts} = $line; - } - elsif ( $line =~ /^Connection\ failures\:/ ) { + } elsif ( $line =~ /^Connection\ failures\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{con_failures} = $line; - } - elsif ( $line =~ /^Outgoing\ requests\:/ ) { + } elsif ( $line =~ /^Outgoing\ requests\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{out_requests} = $line; - } - elsif ( $line =~ /^Server keep-alive offers\:/ ) { + } elsif ( $line =~ /^Server keep-alive offers\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{ska_offers} = $line; - } - elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { + } elsif ( $line =~ /^New\ outgoing\ connections\:/ ) { $multiline_mode = ''; - $line =~ s/.*\:\ \-//; + $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{nog_conns} = $line; - } - elsif ( $line =~ /^Reused\ server\ connections\:/ ) { + } elsif ( $line =~ /^Reused\ server\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*connections\:\ //; $line =~ s/\ .*$//; $to_return->{data}{reused_server_cons} = $line; - } - elsif ( $line =~ /^Empty\ responses\:/ ) { + } elsif ( $line =~ /^Empty\ responses\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps} = $line; - } - elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { + } elsif ( $line =~ /^Empty\ responses\ on\ new\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps_new} = $line; - } - elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { + } elsif ( $line =~ /^Empty\ responses\ on\ reused\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{empty_resps_reuse} = $line; - } - elsif ( $line =~ /^Client\ connections\:/ ) { + } elsif ( $line =~ /^Client\ connections\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{client_cons} = $line; - } - elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { + } elsif ( $line =~ /^Bytes\ of\ content\ transferred\ to\ the\ client\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ //; $line =~ s/\ .*$//; $to_return->{data}{bytes_to_client} = $line; - } - elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { + } elsif ( $line =~ /^Improperly\ accounted\ requests\:/ ) { $multiline_mode = ''; $line =~ s/.*\:\ \~//; $line =~ s/\ .*$//; @@ -341,14 +326,11 @@ foreach my $line (@stdout_split) { # match various multi line modes starts elsif ( $line =~ /^Client\ requests\ per\ connection\ distribution\:/ ) { $multiline_mode = 'requests per con'; - } - elsif ( $line =~ /^Method\ distribution\:/ ) { + } elsif ( $line =~ /^Method\ distribution\:/ ) { $multiline_mode = 'method'; - } - elsif ( $line =~ /^Client HTTP versions:/ ) { + } elsif ( $line =~ /^Client HTTP versions:/ ) { $multiline_mode = 'version'; - } - elsif ( $line + } elsif ( $line =~ /^HTTP\ status\ codes\ according\ to\ \'debug\ 512\' \(status\ codes\ sent\ by\ the\ server\ may\ differ\)\:/ ) { @@ -362,16 +344,14 @@ foreach my $line (@stdout_split) { if ( $line > $to_return->{data}{max_reqs} ) { $to_return->{data}{max_reqs} = $line; } - } - elsif ( $multiline_mode eq 'method' ) { + } elsif ( $multiline_mode eq 'method' ) { $line =~ s/^ +//; my ( $count, $method ) = split( /\ \:\ /, $line ); $method = lc($method); if ( defined( $to_return->{data}{ 'req_' . $method } ) ) { $to_return->{data}{ 'req_' . $method } = $count; } - } - elsif ( $multiline_mode eq 'version' ) { + } elsif ( $multiline_mode eq 'version' ) { $line =~ s/^ +//; my ( $count, $version ) = split( /\ \:\ /, $line ); $version = lc($version); @@ -380,48 +360,38 @@ foreach my $line (@stdout_split) { if ( defined( $to_return->{data}{$version} ) ) { $to_return->{data}{$version} = $count; } - } - elsif ( $multiline_mode eq 'response' ) { + } elsif ( $multiline_mode eq 'response' ) { $line =~ s/^ +//; my ( $count, $response ) = split( /\ \:\ /, $line ); if ( defined( $to_return->{data}{ 'resp_' . $response } ) ) { $to_return->{data}{ 'resp_' . $response } = $count; - } - elsif ( $response =~ /^2\d\d/ ) { + } elsif ( $response =~ /^2\d\d/ ) { $to_return->{data}{resp_2xx_other} = $to_return->{data}{resp_2xx_other} + $count; - } - elsif ( $response =~ /^3\d\d/ ) { + } elsif ( $response =~ /^3\d\d/ ) { $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_3xx_other} + $count; - } - elsif ( $response =~ /^4\d\d/ ) { + } elsif ( $response =~ /^4\d\d/ ) { $to_return->{data}{resp_4xx_other} = $to_return->{data}{resp_4xx_other} + $count; - } - elsif ( $response =~ /^5\d\d/ ) { + } elsif ( $response =~ /^5\d\d/ ) { $to_return->{data}{resp_3xx_other} = $to_return->{data}{resp_5xx_other} + $count; } if ( $response =~ /^1\d\d/ ) { $to_return->{data}{resp_1xx} = $to_return->{data}{resp_1xx} + $count; - } - elsif ( $response =~ /^2\d\d/ ) { + } elsif ( $response =~ /^2\d\d/ ) { $to_return->{data}{resp_2xx} = $to_return->{data}{resp_2xx} + $count; - } - elsif ( $response =~ /^3\d\d/ ) { + } elsif ( $response =~ /^3\d\d/ ) { $to_return->{data}{resp_3xx} = $to_return->{data}{resp_3xx} + $count; - } - elsif ( $response =~ /^4\d\d/ ) { + } elsif ( $response =~ /^4\d\d/ ) { $to_return->{data}{resp_4xx} = $to_return->{data}{resp_4xx} + $count; - } - elsif ( $response =~ /^5\d\d/ ) { + } elsif ( $response =~ /^5\d\d/ ) { $to_return->{data}{resp_5xx} = $to_return->{data}{resp_5xx} + $count; } - } - } - else { + } ## end elsif ( $multiline_mode eq 'response' ) + } else { $multiline_mode = ''; } -} +} ## end foreach my $line (@stdout_split) my @keys_tmp = keys( %{$unique_bdomains} ); $to_return->{data}{unique_bdomains} = @keys_tmp; @@ -442,6 +412,20 @@ if ( $to_return->{data}{blocks} > 0 && $to_return->{data}{client_requests} > 0 ) $to_return->{data}{block_percent} = $to_return->{data}{blocks} / $to_return->{data}{client_requests}; } +if ($compress) { + my $return_string = encode_json($to_return); + my $compressed = encode_base64( gzip($return_string) ); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + print $return_string. "\n"; + } else { + print $compressed; + } + + exit 0; +} ## end if ($compress) + print $json->encode($to_return); if ( !$opts{p} ) { print "\n"; From d0075418b88370ecab13895946f76c6d62242548 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 19 Jun 2023 21:00:16 -0500 Subject: [PATCH 378/497] add smart-v1, a new JSON based smart poller (#474) Now also grabs.... general health status FW version selftest log make model disk + serial --- snmp/smart-v1 | 537 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 537 insertions(+) create mode 100755 snmp/smart-v1 diff --git a/snmp/smart-v1 b/snmp/smart-v1 new file mode 100755 index 000000000..9a42e175b --- /dev/null +++ b/snmp/smart-v1 @@ -0,0 +1,537 @@ +#!/usr/bin/env perl +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=for comment + +Add this to snmpd.conf like below. + + extend smart /etc/snmp/smart + +Then add to root's cron tab, if you have more than a few disks. + + */3 * * * * /etc/snmp/smart -u + +You will also need to create the config file, which defaults to the same path as the script, +but with .config appended. So if the script is located at /etc/snmp/smart, the config file +will be /etc/snmp/smart.config. Alternatively you can also specific a config via -c. + +Anything starting with a # is comment. The format for variables is $variable=$value. Empty +lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any +line with out a matched variable or # are treated as a disk. + + #This is a comment + cache=/var/cache/smart + smartctl=/usr/local/sbin/smartctl + useSN=0 + ada0 + da5 /dev/da5 -d sat + twl0,0 /dev/twl0 -d 3ware,0 + twl0,1 /dev/twl0 -d 3ware,1 + twl0,2 /dev/twl0 -d 3ware,2 + +The variables are as below. + + cache = The path to the cache file to use. Default: /var/cache/smart + smartctl = The path to use for smartctl. Default: /usr/bin/env smartctl + useSN = If set to 1, it will use the disks SN for reporting instead of the device name. + 1 is the default. 0 will use the device name. + +A disk line is can be as simple as just a disk name under /dev/. Such as in the config above +The line "ada0" would resolve to "/dev/ada0" and would be called with no special argument. If +a line has a space in it, everything before the space is treated as the disk name and is what +used for reporting and everything after that is used as the argument to be passed to smartctl. + +If you want to guess at the configuration, call it with -g and it will print out what it thinks +it should be. + +=cut + +## +## You should not need to touch anything below here. +## +use warnings; +use strict; +use Getopt::Std; +use JSON; +use MIME::Base64; +use Gzip::Faster; + +my $cache = '/var/cache/smart'; +my $smartctl = '/usr/bin/env smartctl'; +my @disks; +my $useSN = 1; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "SMART SNMP extend 0.1.0\n"; +} + +sub main::HELP_MESSAGE { + print "\n" + . "-u Update '" + . $cache . "'\n" + . "-g Guess at the config and print it to STDOUT.\n" + . "-c The config file to use.\n" + . "-p Pretty print the JSON.\n" + . "-Z GZip+Base64 compress the results.\n"; + +} ## end sub main::HELP_MESSAGE + +#gets the options +my %opts = (); +getopts( 'ugc:pZ', \%opts ); + +# configure JSON for later usage +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty; +} + +my $to_return = { + data => { disks => {} }, + version => 1, + error => 0, + errorString => '', +}; + +# guess if asked +if ( defined( $opts{g} ) ) { + + #get what path to use for smartctl + $smartctl = `which smartctl`; + chomp($smartctl); + if ( $? != 0 ) { + warn("'which smartctl' failed with a exit code of $?"); + exit 1; + } + + #try to touch the default cache location and warn if it can't be done + system( 'touch ' . $cache . '>/dev/null' ); + if ( $? != 0 ) { + $cache = '#Could not touch ' . $cache . "You will need to manually set it\n" . "cache=?\n"; + } else { + system( 'rm -f ' . $cache . '>/dev/null' ); + $cache = 'cache=' . $cache . "\n"; + } + + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; + + #have smartctl scan and see if it finds anythings not get found + my $scan_output = `$smartctl --scan-open`; + my @scan_outputA = split( /\n/, $scan_output ); + + # remove non-SMART devices sometimes returned + @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes + @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name + @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive + if ( $^O eq 'freebsd' ) { + @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive + @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer + } elsif ( $^O eq 'linux' ) { + @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive + @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive + } + + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments (@scan_outputA) { + my $name = $arguments; + + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if ( defined( $found_disks_names{$name} ) ) { + $found_disks_names{$name}++; + } else { + $found_disks_names{$name} = 0; + } + + push( @argumentsA, $arguments ); + + } ## end foreach my $arguments (@scan_outputA) + + # second pass, putting the lines together + my %current_disk; + my $drive_lines = ''; + foreach my $arguments (@argumentsA) { + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; + } else { + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; + } + + } ## end foreach my $arguments (@argumentsA) + + print "useSN=1\n" . 'smartctl=' . $smartctl . "\n" . $cache . $drive_lines; + + exit 0; +} ## end if ( defined( $opts{g} ) ) + +#get which config file to use +my $config = $0 . '.config'; +if ( defined( $opts{c} ) ) { + $config = $opts{c}; +} + +#reads the config file, optionally +my $config_file = ''; +open( my $readfh, "<", $config ) or die "Can't open '" . $config . "'"; +read( $readfh, $config_file, 1000000 ); +close($readfh); + +#parse the config file and remove comments and empty lines +my @configA = split( /\n/, $config_file ); +@configA = grep( !/^$/, @configA ); +@configA = grep( !/^\#/, @configA ); +@configA = grep( !/^[\s\t]*$/, @configA ); +my $configA_int = 0; +while ( defined( $configA[$configA_int] ) ) { + my $line = $configA[$configA_int]; + chomp($line); + $line =~ s/^[\t\s]+//; + $line =~ s/[\t\s]+$//; + + my ( $var, $val ) = split( /=/, $line, 2 ); + + my $matched; + if ( $var eq 'cache' ) { + $cache = $val; + $matched = 1; + } + + if ( $var eq 'smartctl' ) { + $smartctl = $val; + $matched = 1; + } + + if ( $var eq 'useSN' ) { + $useSN = $val; + $matched = 1; + } + + if ( !defined($val) ) { + push( @disks, $line ); + } + + $configA_int++; +} ## end while ( defined( $configA[$configA_int] ) ) + +#if set to 1, no cache will be written and it will be printed instead +my $noWrite = 0; + +# if no -u, it means we are being called from snmped +if ( !defined( $opts{u} ) ) { + # if the cache file exists, print it, otherwise assume one is not being used + if ( -f $cache ) { + my $old = ''; + open( my $readfh, "<", $cache ) or die "Can't open '" . $cache . "'"; + read( $readfh, $old, 1000000 ); + close($readfh); + print $old; + exit 0; + } else { + $opts{u} = 1; + $noWrite = 1; + } +} ## end if ( !defined( $opts{u} ) ) + +foreach my $line (@disks) { + my $disk; + my $name; + if ( $line =~ /\ / ) { + ( $name, $disk ) = split( /\ /, $line, 2 ); + } else { + $disk = $line; + $name = $line; + } + my $output; + if ( $disk !~ /\// ) { + $disk = '/dev/' . $disk; + } + $output = `$smartctl -A $disk`; + my %IDs = ( + '5' => 'null', + '10' => 'null', + '173' => 'null', + '177' => 'null', + '183' => 'null', + '184' => 'null', + '187' => 'null', + '188' => 'null', + '190' => 'null', + '194' => 'null', + '196' => 'null', + '197' => 'null', + '198' => 'null', + '199' => 'null', + '231' => 'null', + '233' => 'null', + '9' => 'null', + 'disk' => $disk, + 'serial' => undef, + 'selftest_log' => undef, + 'health_pass' => 0, + ); + $IDs{'disk'} =~ s/^\/dev\///; + + my @outputA; + + if ( $output =~ /NVMe Log/ ) { + # we have an NVMe drive with annoyingly different output + my %mappings = ( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + 'Percentage Used' => 231, + ); + foreach ( split( /\n/, $output ) ) { + if (/:/) { + my ( $key, $val ) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if ( exists( $mappings{$key} ) ) { + if ( $mappings{$key} == 231 ) { + $IDs{ $mappings{$key} } = 100 - $val; + } else { + $IDs{ $mappings{$key} } = $val; + } + } + } ## end if (/:/) + } ## end foreach ( split( /\n/, $output ) ) + + } else { + @outputA = split( /\n/, $output ); + my $outputAint = 0; + while ( defined( $outputA[$outputAint] ) ) { + my $line = $outputA[$outputAint]; + $line =~ s/^ +//; + $line =~ s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[9]; + my $normalized = $lineA[3]; + my $id = $lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 ) { + $IDs{231} = $raw; + } + + # single int raw values + if ( ( $id == 5 ) + || ( $id == 10 ) + || ( $id == 173 ) + || ( $id == 183 ) + || ( $id == 184 ) + || ( $id == 187 ) + || ( $id == 196 ) + || ( $id == 197 ) + || ( $id == 198 ) + || ( $id == 199 ) ) + { + my @rawA = split( /\ /, $raw ); + $IDs{$id} = $rawA[0]; + } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) + + # single int normalized values + if ( ( $id == 177 ) + || ( $id == 231 ) + || ( $id == 233 ) ) + { + $IDs{$id} = int($normalized); + } + + # 9, power on hours + if ( $id == 9 ) { + my @runtime = split( /[\ h]/, $raw ); + $IDs{$id} = $runtime[0]; + } + + # 188, Command_Timeout + if ( $id == 188 ) { + my $total = 0; + my @rawA = split( /\ /, $raw ); + my $rawAint = 0; + while ( defined( $rawA[$rawAint] ) ) { + $total = $total + $rawA[$rawAint]; + $rawAint++; + } + $IDs{$id} = $total; + } ## end if ( $id == 188 ) + + # 190, airflow temp + # 194, temp + if ( ( $id == 190 ) + || ( $id == 194 ) ) + { + my ($temp) = split( /\ /, $raw ); + $IDs{$id} = $temp; + } + } ## end if ( $line =~ /^[0123456789]+ / ) + + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) + + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + + if ( $line =~ "Elements in grown defect list:" ) { + + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[5]; + + # Reallocated Sector Count ID + $IDs{5} = $raw; + + } + + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + + if ( $line =~ "Current Drive Temperature:" ) { + + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[3]; + + # Temperature C ID + $IDs{194} = $raw; + + } + + # End of SAS Wrapper + + $outputAint++; + } ## end while ( defined( $outputA[$outputAint] ) ) + } ## end else [ if ( $output =~ /NVMe Log/ ) ] + + #get the selftest logs + $output = `$smartctl -l selftest $disk`; + @outputA = split( /\n/, $output ); + my @completed = grep( /Completed without error/, @outputA ); + $IDs{'completed'} = scalar @completed; + my @interrupted = grep( /Interrupted/, @outputA ); + $IDs{'interrupted'} = scalar @interrupted; + my @read_failure = grep( /read failure/, @outputA ); + $IDs{'read_failure'} = scalar @read_failure; + my @unknown_failure = grep( /unknown failure/, @outputA ); + $IDs{'unknown_failure'} = scalar @unknown_failure; + my @extended = grep( /Extended/, @outputA ); + $IDs{'extended'} = scalar @extended; + my @short = grep( /Short/, @outputA ); + $IDs{'short'} = scalar @short; + my @conveyance = grep( /Conveyance/, @outputA ); + $IDs{'conveyance'} = scalar @conveyance; + my @selective = grep( /Selective/, @outputA ); + $IDs{'selective'} = scalar @selective; + + # if we have logs, actually grab the log output + if ( $IDs{'completed'} > 0 + || $IDs{'interrupted'} > 0 + || $IDs{'read_failure'} > 0 + || $IDs{'extended'} > 0 + || $IDs{'short'} > 0 + || $IDs{'conveyance'} > 0 + || $IDs{'selective'} > 0 ) + { + my @log_lines; + push( @log_lines, @extended, @short, @conveyance, @selective ); + $IDs{'selftest_log'} = join( "\n", sort(@log_lines) ); + } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) + + # get the drive serial number, if needed + my $disk_id = $name; + $output=`$smartctl -i $disk`; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $IDs{'serial'} = $1; + $IDs{'serial'} =~ s/^\s+|\s+$//g; + } + if ($useSN) { + $disk_id = $IDs{'serial'}; + } + + while ( $output =~ /(?i)Model Family:(.*)/g ) { + $IDs{'model_family'} = $1; + $IDs{'model_family'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Device Model:(.*)/g ) { + $IDs{'device_model'} = $1; + $IDs{'device_model'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Model Number:(.*)/g ) { + $IDs{'model_number'} = $1; + $IDs{'model_number'} =~ s/^\s+|\s+$//g; + } + + while ( $output =~ /(?i)Firmware Version:(.*)/g ) { + $IDs{'fw_version'} = $1; + $IDs{'fw_version'} =~ s/^\s+|\s+$//g; + } + + $output = `$smartctl -H $disk`; + if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { + $IDs{'health_pass'} = 1; + } + + $to_return->{data}{disks}{$disk_id} = \%IDs; + +} ## end foreach my $line (@disks) + +my $toReturn = $json->encode($to_return); + +if ( !$opts{p} ) { + $toReturn = $toReturn . "\n"; +} + +if ($opts{Z}) { + my $compressed = encode_base64( gzip($toReturn) ); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) < length($toReturn) ) { + $toReturn=$compressed; + } +} + +if ( !$noWrite ) { + open( my $writefh, ">", $cache ) or die "Can't open '" . $cache . "'"; + print $writefh $toReturn; + close($writefh); +} else { + print $toReturn; +} From a8cbebbcaf7c7671b4c589bc238763c5c52a10cd Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 26 Jun 2023 22:33:44 -0500 Subject: [PATCH 379/497] update smart-v1 some more (#476) * add various HP specific bits for identity info * more HP related cleanup * add initial ccis guessing support * ccis -> cciss * rework cciss support some more * derp, fix qoute type * make useSN configuration with -g * rework self test logs to be more HP friendly * more test cleanup * more test cleanup * finally get the extended test playing nice with HP stuff * don't print needless error messages if cciss_vol_status is not found * cleanup a edge case, add a new edge case, and now find the max temp * add id 232 * make the scan modes selectable and begin reworking cciss forproperly checking all possible devices * rework how the cciss device path is generated * add exit status checking * improve cciss guess * cleanup the cciss checks some more * convert to IO::Compress::Gzip and update docs --- snmp/smart-v1 | 796 ++++++++++++++++++++++++++++++++++---------------- 1 file changed, 543 insertions(+), 253 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 9a42e175b..d3b9bbdd6 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -30,11 +30,11 @@ Add this to snmpd.conf like below. Then add to root's cron tab, if you have more than a few disks. - */3 * * * * /etc/snmp/smart -u + */5 * * * * /etc/snmp/extends/smart -u You will also need to create the config file, which defaults to the same path as the script, but with .config appended. So if the script is located at /etc/snmp/smart, the config file -will be /etc/snmp/smart.config. Alternatively you can also specific a config via -c. +will be /etc/snmp/extends/smart.config. Alternatively you can also specific a config via -c. Anything starting with a # is comment. The format for variables is $variable=$value. Empty lines are ignored. Spaces and tabes at either the start or end of a line are ignored. Any @@ -65,6 +65,31 @@ used for reporting and everything after that is used as the argument to be passe If you want to guess at the configuration, call it with -g and it will print out what it thinks it should be. + +Switches: + +-c The config file to use. +-u Update +-p Pretty print the JSON. +-Z GZip+Base64 compress the results. + +-g Guess at the config and print it to STDOUT +-C Enable manual checking for guess and cciss. +-S Set useSN to 0 when using -g +-G Guess modes to use. This is a comma seperated list. + Default :: scan-open,cciss-vol-status + +Guess Modes: + +- scan :: Use "--scan" with smartctl. "scan-open" will take presidence. + +- scan-open :: Call smartctl with "--scan-open". + +- cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or + /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, + and then optionally checking for disks via smrtctl if -C is given. Should be noted + though that -C will not find drives that are currently missing/failed. + =cut ## @@ -75,7 +100,7 @@ use strict; use Getopt::Std; use JSON; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); my $cache = '/var/cache/smart'; my $smartctl = '/usr/bin/env smartctl'; @@ -85,38 +110,92 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.1.0\n"; + print "SMART SNMP extend 0.2.0\n"; } sub main::HELP_MESSAGE { - print "\n" - . "-u Update '" - . $cache . "'\n" - . "-g Guess at the config and print it to STDOUT.\n" - . "-c The config file to use.\n" - . "-p Pretty print the JSON.\n" - . "-Z GZip+Base64 compress the results.\n"; + &VERSION_MESSAGE; + print "\n" . "-u Update '" . $cache . "'\n" . '-g Guess at the config and print it to STDOUT +-c The config file to use. +-p Pretty print the JSON. +-Z GZip+Base64 compress the results. +-C Enable manual checking for guess and cciss. +-S Set useSN to 0 when using -g +-G Guess modes to use. This is a comma seperated list. + Default :: scan-open,cciss-vol-status + +Scan Modes: + +- scan :: Use "--scan" with smartctl. "scan-open" will take presidence. + +- scan-open :: Call smartctl with "--scan-open". + +- cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or + /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, + and then optionally checking for disks via smrtctl if -C is given. Should be noted + though that -C will not find drives that are currently missing/failed. +'; } ## end sub main::HELP_MESSAGE #gets the options my %opts = (); -getopts( 'ugc:pZ', \%opts ); +getopts( 'ugc:pZhvCSG', \%opts ); + +if ( $opts{h} ) { + &HELP_MESSAGE; + exit; +} +if ( $opts{v} ) { + &VERSION_MESSAGE; + exit; +} + +# +# figure out what scan modes to use if -g specified +# +my $scan_modes = { + 'scan-open' => 0, + 'scan' => 0, + 'cciss_vol_status' => 0, +}; +if ( $opts{g} ) { + if ( !defined( $opts{G} ) ) { + $opts{G} = 'scan-open,cciss_vol_status'; + } + $opts{G} =~ s/[\ \t]//g; + my @scan_modes_split = split( /,/, $opts{G} ); + foreach my $mode (@scan_modes_split) { + if ( !defined $scan_modes->{$mode} ) { + die( '"' . $mode . '" is not a recognized scan mode' ); + } + $scan_modes->{$mode} = 1; + } +} ## end if ( $opts{g} ) # configure JSON for later usage -my $json = JSON->new->allow_nonref->canonical(1); -if ( $opts{p} ) { - $json->pretty; +# only need to do this if actually running as in -g is not specified +my $json; +if ( !$opts{g} ) { + + $json = JSON->new->allow_nonref->canonical(1); + if ( $opts{p} ) { + $json->pretty; + } } my $to_return = { - data => { disks => {} }, + data => { disks => {}, exit_nonzero => 0, unhealthy => 0, }, version => 1, error => 0, errorString => '', }; +# +# # guess if asked +# +# if ( defined( $opts{g} ) ) { #get what path to use for smartctl @@ -136,67 +215,192 @@ if ( defined( $opts{g} ) ) { $cache = 'cache=' . $cache . "\n"; } - # used for checking if a disk has been found more than once - my %found_disks_names; - my @argumentsA; - - #have smartctl scan and see if it finds anythings not get found - my $scan_output = `$smartctl --scan-open`; - my @scan_outputA = split( /\n/, $scan_output ); - - # remove non-SMART devices sometimes returned - @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes - @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name - @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive - if ( $^O eq 'freebsd' ) { - @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive - @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer - } elsif ( $^O eq 'linux' ) { - @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive - @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive - } + my $drive_lines = ''; - # make the first pass, figuring out what all we have and trimming comments - foreach my $arguments (@scan_outputA) { - my $name = $arguments; + # + # + # scan-open and scan guess mode handling + # + # + if ( $scan_modes->{'scan-open'} || $scan_modes->{'scan'} ) { + # used for checking if a disk has been found more than once + my %found_disks_names; + my @argumentsA; + + # use scan-open if it is set, overriding scan if it is also set + my $mode = 'scan'; + if ( $scan_modes->{'scan-open'} ) { + $mode = 'scan-open'; + } - $arguments =~ s/ \#.*//; # trim the comment out of the argument - $name =~ s/ .*//; - $name =~ s/\/dev\///; - if ( defined( $found_disks_names{$name} ) ) { - $found_disks_names{$name}++; - } else { - $found_disks_names{$name} = 0; + #have smartctl scan and see if it finds anythings not get found + my $scan_output = `$smartctl --$mode`; + my @scan_outputA = split( /\n/, $scan_output ); + + # remove non-SMART devices sometimes returned + @scan_outputA = grep( !/ses[0-9]/, @scan_outputA ); # not a disk, but may or may not have SMART attributes + @scan_outputA = grep( !/pass[0-9]/, @scan_outputA ); # very likely a duplicate and a disk under another name + @scan_outputA = grep( !/cd[0-9]/, @scan_outputA ); # CD drive + if ( $^O eq 'freebsd' ) { + @scan_outputA = grep( !/sa[0-9]/, @scan_outputA ); # tape drive + @scan_outputA = grep( !/ctl[0-9]/, @scan_outputA ); # CAM target layer + } elsif ( $^O eq 'linux' ) { + @scan_outputA = grep( !/st[0-9]/, @scan_outputA ); # SCSI tape drive + @scan_outputA = grep( !/ht[0-9]/, @scan_outputA ); # ATA tape drive } - push( @argumentsA, $arguments ); + # make the first pass, figuring out what all we have and trimming comments + foreach my $arguments (@scan_outputA) { + my $name = $arguments; - } ## end foreach my $arguments (@scan_outputA) + $arguments =~ s/ \#.*//; # trim the comment out of the argument + $name =~ s/ .*//; + $name =~ s/\/dev\///; + if ( defined( $found_disks_names{$name} ) ) { + $found_disks_names{$name}++; + } else { + $found_disks_names{$name} = 0; + } - # second pass, putting the lines together - my %current_disk; - my $drive_lines = ''; - foreach my $arguments (@argumentsA) { - my $name = $arguments; - $name =~ s/ .*//; - $name =~ s/\/dev\///; - - if ( $found_disks_names{$name} == 0 ) { - # If no other devices, just name it after the base device. - $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; - } else { - # if more than one, start at zero and increment, apennding comma number to the base device name - if ( defined( $current_disk{$name} ) ) { - $current_disk{$name}++; + push( @argumentsA, $arguments ); + + } ## end foreach my $arguments (@scan_outputA) + + # second pass, putting the lines together + my %current_disk; + foreach my $arguments (@argumentsA) { + my $name = $arguments; + $name =~ s/ .*//; + $name =~ s/\/dev\///; + + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; } else { - $current_disk{$name} = 0; + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; } - $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; + + } ## end foreach my $arguments (@argumentsA) + } ## end if ( $scan_modes->{'scan-open'} || $scan_modes...) + + # + # + # scan mode handler for cciss_vol_status + # /dev/sg* devices for cciss on Linux + # /dev/ccis* devices for cciss on FreeBSD + # + # + if ( $scan_modes->{'cciss_vol_status'} && ( $^O eq 'linux' || $^O eq 'freebsd' ) ) { + my $cciss; + if ( $^O eq 'freebsd' ) { + $cciss = 'ciss'; + } elsif ( $^O eq 'linux' ) { + $cciss = 'sg'; } - } ## end foreach my $arguments (@argumentsA) + # generate the initial device path that will be checked + my $sg_int = 0; + my $device = '/dev/' . $cciss . $sg_int; + + my $sg_process = 1; + if ( -e $device ) { + my $output = `which cciss_vol_status 2> /dev/null`; + if ( $? != 0 && !$opts{C} ) { + $sg_process = 0; + $drive_lines + = $drive_lines + . "# -C not given, but " + . $device + . " exists and cciss_vol_status is not present\n" + . "# in path or 'ccis_vol_status -V " + . $device + . "' is failing\n"; + } ## end if ( $? != 0 && !$opts{C} ) + } ## end if ( -e $device ) + my $seen_lines = {}; + while ( -e $device && $sg_process ) { + my $output = `cciss_vol_status -V $device 2> /dev/null`; + if ( $? != 0 && $output eq '' && !$opts{C} ) { + # just empty here as we just want to skip it if it fails and there is no C + # warning is above + } elsif ( $? != 0 && $output eq '' && $opts{C} ) { + my $drive_count = 0; + my $continue = 1; + while ($continue) { + my $output = `$smartctl -A $device -d cciss,$drive_count 2> /dev/null`; + if ( $? != 0 ) { + $continue = 0; + } else { + $continue = 0; + my $add_it = 0; + # if we have smart data for this device, process it + while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g && !$continue ) { + $continue = 1; + my $id; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $id = $1; + $id =~ s/^\s+|\s+$//g; + } + if ( defined($id) && !defined( $seen_lines->{$id} ) ) { + $add_it = 1; + $seen_lines->{$id} = 1; + } + } ## end while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g...) + if ( $continue && $add_it ) { + $drive_lines + = $drive_lines + . $cciss . '0-' + . $drive_count . ' ' + . $device + . ' -d cciss,' + . $drive_count . "\n"; + } + } ## end else [ if ( $? != 0 ) ] + $drive_count++; + } ## end while ($continue) + } else { + my $sg_drive_int = 0; + my $drive_count = 0; + # count the connector lines, this will make sure failed are founded as well + while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { + if ( !defined( $seen_lines->{$1} ) ) { + $seen_lines->{$1} = 1; + $drive_count++; + } + } + my $drive_int = 0; + while ( $drive_int < $drive_count ) { + $drive_lines + = $drive_lines . $cciss . '0-' . $drive_int . ' ' . $device . ' -d cciss,' . $drive_int . "\n"; - print "useSN=1\n" . 'smartctl=' . $smartctl . "\n" . $cache . $drive_lines; + $drive_int++; + } + } ## end else [ if ( $? != 0 && $output eq '' && !$opts{C})] + + $sg_int++; + $device = '/dev/' . $cciss . $sg_int; + } ## end while ( -e $device && $sg_process ) + } ## end if ( $scan_modes->{'cciss_vol_status'} && ...) + + my $useSN = 1; + if ( $opts{S} ) { + $useSN = 0; + } + + print '# scan_modes=' + . $opts{G} + . "\nuseSN=" + . $useSN . "\n" + . 'smartctl=' + . $smartctl . "\n" + . $cache + . $drive_lines; exit 0; } ## end if ( defined( $opts{g} ) ) @@ -213,7 +417,11 @@ open( my $readfh, "<", $config ) or die "Can't open '" . $config . "'"; read( $readfh, $config_file, 1000000 ); close($readfh); -#parse the config file and remove comments and empty lines +# +# +# parse the config file and remove comments and empty lines +# +# my @configA = split( /\n/, $config_file ); @configA = grep( !/^$/, @configA ); @configA = grep( !/^\#/, @configA ); @@ -269,6 +477,11 @@ if ( !defined( $opts{u} ) ) { } } ## end if ( !defined( $opts{u} ) ) +# +# +# Process each disk +# +# foreach my $line (@disks) { my $disk; my $name; @@ -278,12 +491,11 @@ foreach my $line (@disks) { $disk = $line; $name = $line; } - my $output; if ( $disk !~ /\// ) { $disk = '/dev/' . $disk; } - $output = `$smartctl -A $disk`; - my %IDs = ( + my $output = `$smartctl -A $disk`; + my %IDs = ( '5' => 'null', '10' => 'null', '173' => 'null', @@ -299,218 +511,294 @@ foreach my $line (@disks) { '198' => 'null', '199' => 'null', '231' => 'null', + '232' => 'null', '233' => 'null', '9' => 'null', 'disk' => $disk, 'serial' => undef, 'selftest_log' => undef, 'health_pass' => 0, + max_temp => 'null', + exit => $?, ); $IDs{'disk'} =~ s/^\/dev\///; - my @outputA; - - if ( $output =~ /NVMe Log/ ) { - # we have an NVMe drive with annoyingly different output - my %mappings = ( - 'Temperature' => 194, - 'Power Cycles' => 12, - 'Power On Hours' => 9, - 'Percentage Used' => 231, - ); - foreach ( split( /\n/, $output ) ) { - if (/:/) { - my ( $key, $val ) = split(/:/); - $val =~ s/^\s+|\s+$|\D+//g; - if ( exists( $mappings{$key} ) ) { - if ( $mappings{$key} == 231 ) { - $IDs{ $mappings{$key} } = 100 - $val; - } else { - $IDs{ $mappings{$key} } = $val; - } - } - } ## end if (/:/) - } ## end foreach ( split( /\n/, $output ) ) - + # if polling exited non-zero above, no reason running the rest of the checks + my $disk_id = $name; + if ( $IDs{exit} != 0 ) { + $to_return->{data}{exit_nonzero}++; } else { - @outputA = split( /\n/, $output ); - my $outputAint = 0; - while ( defined( $outputA[$outputAint] ) ) { - my $line = $outputA[$outputAint]; - $line =~ s/^ +//; - $line =~ s/ +/ /g; - - if ( $line =~ /^[0123456789]+ / ) { - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[9]; - my $normalized = $lineA[3]; - my $id = $lineA[0]; - - # Crucial SSD - # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left - if ( $id == 202 ) { - $IDs{231} = $raw; - } + my @outputA; + + if ( $output =~ /NVMe Log/ ) { + # we have an NVMe drive with annoyingly different output + my %mappings = ( + 'Temperature' => 194, + 'Power Cycles' => 12, + 'Power On Hours' => 9, + 'Percentage Used' => 231, + ); + foreach ( split( /\n/, $output ) ) { + if (/:/) { + my ( $key, $val ) = split(/:/); + $val =~ s/^\s+|\s+$|\D+//g; + if ( exists( $mappings{$key} ) ) { + if ( $mappings{$key} == 231 ) { + $IDs{ $mappings{$key} } = 100 - $val; + } else { + $IDs{ $mappings{$key} } = $val; + } + } + } ## end if (/:/) + } ## end foreach ( split( /\n/, $output ) ) - # single int raw values - if ( ( $id == 5 ) - || ( $id == 10 ) - || ( $id == 173 ) - || ( $id == 183 ) - || ( $id == 184 ) - || ( $id == 187 ) - || ( $id == 196 ) - || ( $id == 197 ) - || ( $id == 198 ) - || ( $id == 199 ) ) - { - my @rawA = split( /\ /, $raw ); - $IDs{$id} = $rawA[0]; - } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) - - # single int normalized values - if ( ( $id == 177 ) - || ( $id == 231 ) - || ( $id == 233 ) ) - { - $IDs{$id} = int($normalized); - } + } else { + @outputA = split( /\n/, $output ); + my $outputAint = 0; + while ( defined( $outputA[$outputAint] ) ) { + my $line = $outputA[$outputAint]; + $line =~ s/^ +//; + $line =~ s/ +/ /g; + + if ( $line =~ /^[0123456789]+ / ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[9]; + my $normalized = $lineA[3]; + my $id = $lineA[0]; + + # Crucial SSD + # 202, Percent_Lifetime_Remain, same as 231, SSD Life Left + if ( $id == 202 + && $line =~ /Percent_Lifetime_Remain/ ) + { + $IDs{231} = $raw; + } - # 9, power on hours - if ( $id == 9 ) { - my @runtime = split( /[\ h]/, $raw ); - $IDs{$id} = $runtime[0]; - } + # single int raw values + if ( ( $id == 5 ) + || ( $id == 10 ) + || ( $id == 173 ) + || ( $id == 183 ) + || ( $id == 184 ) + || ( $id == 187 ) + || ( $id == 196 ) + || ( $id == 197 ) + || ( $id == 198 ) + || ( $id == 199 ) ) + { + my @rawA = split( /\ /, $raw ); + $IDs{$id} = $rawA[0]; + } ## end if ( ( $id == 5 ) || ( $id == 10 ) || ( $id...)) + + # single int normalized values + if ( ( $id == 177 ) + || ( $id == 230 ) + || ( $id == 231 ) + || ( $id == 232 ) + || ( $id == 233 ) ) + { + # annoying non-standard disk + # WDC WDS500G2B0A + # 230 Media_Wearout_Indicator 0x0032 100 100 --- Old_age Always - 0x002e000a002e + # 232 Available_Reservd_Space 0x0033 100 100 004 Pre-fail Always - 100 + # 233 NAND_GB_Written_TLC 0x0032 100 100 --- Old_age Always - 9816 + + if ( $id == 230 + && $line =~ /Media_Wearout_Indicator/ ) + { + $IDs{233} = int($normalized); + } elsif ( $id == 232 + && $line =~ /Available_Reservd_Space/ ) + { + $IDs{232} = int($normalized); + } else { + # only set 233 if it has not been set yet + # if it was set already then the above did it and we don't want + # to overwrite it + if ( $id == 233 && $IDs{233} eq "null" ) { + $IDs{$id} = int($normalized); + } elsif ( $id != 233 ) { + $IDs{$id} = int($normalized); + } + } ## end else [ if ( $id == 230 && $line =~ /Media_Wearout_Indicator/)] + } ## end if ( ( $id == 177 ) || ( $id == 230 ) || (...)) + + # 9, power on hours + if ( $id == 9 ) { + my @runtime = split( /[\ h]/, $raw ); + $IDs{$id} = $runtime[0]; + } - # 188, Command_Timeout - if ( $id == 188 ) { - my $total = 0; - my @rawA = split( /\ /, $raw ); - my $rawAint = 0; - while ( defined( $rawA[$rawAint] ) ) { - $total = $total + $rawA[$rawAint]; - $rawAint++; + # 188, Command_Timeout + if ( $id == 188 ) { + my $total = 0; + my @rawA = split( /\ /, $raw ); + my $rawAint = 0; + while ( defined( $rawA[$rawAint] ) ) { + $total = $total + $rawA[$rawAint]; + $rawAint++; + } + $IDs{$id} = $total; + } ## end if ( $id == 188 ) + + # 190, airflow temp + # 194, temp + if ( ( $id == 190 ) + || ( $id == 194 ) ) + { + my ($temp) = split( /\ /, $raw ); + $IDs{$id} = $temp; } - $IDs{$id} = $total; - } ## end if ( $id == 188 ) - - # 190, airflow temp - # 194, temp - if ( ( $id == 190 ) - || ( $id == 194 ) ) - { - my ($temp) = split( /\ /, $raw ); - $IDs{$id} = $temp; - } - } ## end if ( $line =~ /^[0123456789]+ / ) + } ## end if ( $line =~ /^[0123456789]+ / ) - # SAS Wrapping - # Section by Cameron Munroe (munroenet[at]gmail.com) + # SAS Wrapping + # Section by Cameron Munroe (munroenet[at]gmail.com) - # Elements in Grown Defect List. - # Marking as 5 Reallocated_Sector_Ct + # Elements in Grown Defect List. + # Marking as 5 Reallocated_Sector_Ct + if ( $line =~ "Elements in grown defect list:" ) { - if ( $line =~ "Elements in grown defect list:" ) { + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[5]; - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[5]; + # Reallocated Sector Count ID + $IDs{5} = $raw; - # Reallocated Sector Count ID - $IDs{5} = $raw; + } - } + # Current Drive Temperature + # Marking as 194 Temperature_Celsius + if ( $line =~ "Current Drive Temperature:" ) { - # Current Drive Temperature - # Marking as 194 Temperature_Celsius + my @lineA = split( /\ /, $line, 10 ); + my $raw = $lineA[3]; - if ( $line =~ "Current Drive Temperature:" ) { + # Temperature C ID + $IDs{194} = $raw; - my @lineA = split( /\ /, $line, 10 ); - my $raw = $lineA[3]; + } - # Temperature C ID - $IDs{194} = $raw; + # End of SAS Wrapper - } + $outputAint++; + } ## end while ( defined( $outputA[$outputAint] ) ) + } ## end else [ if ( $output =~ /NVMe Log/ ) ] - # End of SAS Wrapper - - $outputAint++; - } ## end while ( defined( $outputA[$outputAint] ) ) - } ## end else [ if ( $output =~ /NVMe Log/ ) ] - - #get the selftest logs - $output = `$smartctl -l selftest $disk`; - @outputA = split( /\n/, $output ); - my @completed = grep( /Completed without error/, @outputA ); - $IDs{'completed'} = scalar @completed; - my @interrupted = grep( /Interrupted/, @outputA ); - $IDs{'interrupted'} = scalar @interrupted; - my @read_failure = grep( /read failure/, @outputA ); - $IDs{'read_failure'} = scalar @read_failure; - my @unknown_failure = grep( /unknown failure/, @outputA ); - $IDs{'unknown_failure'} = scalar @unknown_failure; - my @extended = grep( /Extended/, @outputA ); - $IDs{'extended'} = scalar @extended; - my @short = grep( /Short/, @outputA ); - $IDs{'short'} = scalar @short; - my @conveyance = grep( /Conveyance/, @outputA ); - $IDs{'conveyance'} = scalar @conveyance; - my @selective = grep( /Selective/, @outputA ); - $IDs{'selective'} = scalar @selective; - - # if we have logs, actually grab the log output - if ( $IDs{'completed'} > 0 - || $IDs{'interrupted'} > 0 - || $IDs{'read_failure'} > 0 - || $IDs{'extended'} > 0 - || $IDs{'short'} > 0 - || $IDs{'conveyance'} > 0 - || $IDs{'selective'} > 0 ) - { - my @log_lines; - push( @log_lines, @extended, @short, @conveyance, @selective ); - $IDs{'selftest_log'} = join( "\n", sort(@log_lines) ); - } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) - - # get the drive serial number, if needed - my $disk_id = $name; - $output=`$smartctl -i $disk`; - while ( $output =~ /(?i)Serial Number:(.*)/g ) { - $IDs{'serial'} = $1; - $IDs{'serial'} =~ s/^\s+|\s+$//g; - } - if ($useSN) { - $disk_id = $IDs{'serial'}; - } + #get the selftest logs + $output = `$smartctl -l selftest $disk`; + @outputA = split( /\n/, $output ); + my @completed = grep( /Completed/, @outputA ); + $IDs{'completed'} = scalar @completed; + my @interrupted = grep( /Interrupted/, @outputA ); + $IDs{'interrupted'} = scalar @interrupted; + my @read_failure = grep( /read failure/, @outputA ); + $IDs{'read_failure'} = scalar @read_failure; + my @unknown_failure = grep( /unknown failure/, @outputA ); + $IDs{'unknown_failure'} = scalar @unknown_failure; + my @extended = grep( /\d.*\ ([Ee]xtended|[Ll]ong).*(?![Dd]uration)/, @outputA ); + $IDs{'extended'} = scalar @extended; + my @short = grep( /[Ss]hort/, @outputA ); + $IDs{'short'} = scalar @short; + my @conveyance = grep( /[Cc]onveyance/, @outputA ); + $IDs{'conveyance'} = scalar @conveyance; + my @selective = grep( /[Ss]elective/, @outputA ); + $IDs{'selective'} = scalar @selective; + my @offline = grep( /(\d|[Bb]ackground|[Ff]oreground)+\ +[Oo]ffline/, @outputA ); + $IDs{'offline'} = scalar @offline; + + # if we have logs, actually grab the log output + if ( $IDs{'completed'} > 0 + || $IDs{'interrupted'} > 0 + || $IDs{'read_failure'} > 0 + || $IDs{'extended'} > 0 + || $IDs{'short'} > 0 + || $IDs{'conveyance'} > 0 + || $IDs{'selective'} > 0 + || $IDs{'offline'} > 0 ) + { + my @headers = grep( /(Num\ +Test.*LBA| Description .*[Hh]ours)/, @outputA ); + + my @log_lines; + push( @log_lines, @extended, @short, @conveyance, @selective, @offline ); + $IDs{'selftest_log'} = join( "\n", @headers, sort(@log_lines) ); + } ## end if ( $IDs{'completed'} > 0 || $IDs{'interrupted'...}) + + # get the drive serial number, if needed + $disk_id = $name; + $output = `$smartctl -i $disk`; + # generally upper case, HP branded drives seem to report with lower case n + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $IDs{'serial'} = $1; + $IDs{'serial'} =~ s/^\s+|\s+$//g; + } + if ($useSN) { + $disk_id = $IDs{'serial'}; + } - while ( $output =~ /(?i)Model Family:(.*)/g ) { - $IDs{'model_family'} = $1; - $IDs{'model_family'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Model Family:(.*)/g ) { + $IDs{'model_family'} = $1; + $IDs{'model_family'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Device Model:(.*)/g ) { - $IDs{'device_model'} = $1; - $IDs{'device_model'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Device Model:(.*)/g ) { + $IDs{'device_model'} = $1; + $IDs{'device_model'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Model Number:(.*)/g ) { - $IDs{'model_number'} = $1; - $IDs{'model_number'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Model Number:(.*)/g ) { + $IDs{'model_number'} = $1; + $IDs{'model_number'} =~ s/^\s+|\s+$//g; + } - while ( $output =~ /(?i)Firmware Version:(.*)/g ) { - $IDs{'fw_version'} = $1; - $IDs{'fw_version'} =~ s/^\s+|\s+$//g; - } + while ( $output =~ /(?i)Firmware Version:(.*)/g ) { + $IDs{'fw_version'} = $1; + $IDs{'fw_version'} =~ s/^\s+|\s+$//g; + } - $output = `$smartctl -H $disk`; - if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { - $IDs{'health_pass'} = 1; - } + # mainly HP drives + while ( $output =~ /(?i)Vendor:(.*)/g ) { + $IDs{'vendor'} = $1; + $IDs{'vendor'} =~ s/^\s+|\s+$//g; + } + + # mainly HP drives + while ( $output =~ /(?i)Product:(.*)/g ) { + $IDs{'product'} = $1; + $IDs{'product'} =~ s/^\s+|\s+$//g; + } - $to_return->{data}{disks}{$disk_id} = \%IDs; + # mainly HP drives + while ( $output =~ /(?i)Revision:(.*)/g ) { + $IDs{'revision'} = $1; + $IDs{'revision'} =~ s/^\s+|\s+$//g; + } + + # figure out what to use for the max temp, if there is one + if ( $IDs{'190'} =~ /^\d+$/ ) { + $IDs{max_temp} = $IDs{'190'}; + } elsif ( $IDs{'194'} =~ /^\d+$/ ) { + $IDs{max_temp} = $IDs{'194'}; + } + if ( $IDs{'194'} =~ /^\d+$/ && defined( $IDs{max_temp} ) && $IDs{'194'} > $IDs{max_temp} ) { + $IDs{max_temp} = $IDs{'194'}; + } + $output = `$smartctl -H $disk`; + if ( $output =~ /SMART\ overall\-health\ self\-assessment\ test\ result\:\ PASSED/ ) { + $IDs{'health_pass'} = 1; + } elsif ( $output =~ /SMART\ Health\ Status\:\ OK/ ) { + $IDs{'health_pass'} = 1; + } + + if ( !$IDs{'health_pass'} ) { + $to_return->{data}{unhealthy}++; + } + } ## end else [ if ( $IDs{exit} != 0 ) ] + + # only bother to save this if useSN is not being used + if ( !$useSN ) { + $to_return->{data}{disks}{$disk_id} = \%IDs; + } } ## end foreach my $line (@disks) my $toReturn = $json->encode($to_return); @@ -519,14 +807,16 @@ if ( !$opts{p} ) { $toReturn = $toReturn . "\n"; } -if ($opts{Z}) { - my $compressed = encode_base64( gzip($toReturn) ); +if ( $opts{Z} ) { + my $toReturnCompressed; + gzip \$toReturn => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; if ( length($compressed) < length($toReturn) ) { - $toReturn=$compressed; + $toReturn = $compressed; } -} +} ## end if ( $opts{Z} ) if ( !$noWrite ) { open( my $writefh, ">", $cache ) or die "Can't open '" . $cache . "'"; From 94e4a3d6862c183f95a365a87355e68b040872c9 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 28 Jun 2023 08:37:37 -0500 Subject: [PATCH 380/497] various smart-v1 fixes (#477) * use $sg_int when composing lines for cciss devs * more cleanup for cciss stuff for when it does not recognize a device * use -i with smart for the cciss -C test * add in virt checking * fix regex typos for virt check --- snmp/smart-v1 | 72 ++++++++++++++++++++++++++++++--------------------- 1 file changed, 43 insertions(+), 29 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index d3b9bbdd6..9e58e1d96 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -269,22 +269,36 @@ if ( defined( $opts{g} ) ) { # second pass, putting the lines together my %current_disk; foreach my $arguments (@argumentsA) { + my $not_virt = 1; + + # check to see if we have a virtual device + my @virt_check = split( /\n/, `smartctl -i $arguments 2> /dev/null` ); + foreach my $virt_check_line (@virt_check) { + if ( $virt_check_line =~ /(?i)Product\:.*LOGICAL VOLUME/ ) { + $not_virt = 0; + } + } + my $name = $arguments; $name =~ s/ .*//; $name =~ s/\/dev\///; - if ( $found_disks_names{$name} == 0 ) { - # If no other devices, just name it after the base device. - $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; - } else { - # if more than one, start at zero and increment, apennding comma number to the base device name - if ( defined( $current_disk{$name} ) ) { - $current_disk{$name}++; + # only add it if not a virtual RAID drive + # HP RAID virtual disks will show up with very basical but totally useless smart data + if ($not_virt) { + if ( $found_disks_names{$name} == 0 ) { + # If no other devices, just name it after the base device. + $drive_lines = $drive_lines . $name . " " . $arguments . "\n"; } else { - $current_disk{$name} = 0; + # if more than one, start at zero and increment, apennding comma number to the base device name + if ( defined( $current_disk{$name} ) ) { + $current_disk{$name}++; + } else { + $current_disk{$name} = 0; + } + $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; } - $drive_lines = $drive_lines . $name . "," . $current_disk{$name} . " " . $arguments . "\n"; - } + } ## end if ($not_virt) } ## end foreach my $arguments (@argumentsA) } ## end if ( $scan_modes->{'scan-open'} || $scan_modes...) @@ -333,25 +347,20 @@ if ( defined( $opts{g} ) ) { my $drive_count = 0; my $continue = 1; while ($continue) { - my $output = `$smartctl -A $device -d cciss,$drive_count 2> /dev/null`; + my $output = `$smartctl -i $device -d cciss,$drive_count 2> /dev/null`; if ( $? != 0 ) { $continue = 0; } else { - $continue = 0; my $add_it = 0; - # if we have smart data for this device, process it - while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g && !$continue ) { - $continue = 1; - my $id; - while ( $output =~ /(?i)Serial Number:(.*)/g ) { - $id = $1; - $id =~ s/^\s+|\s+$//g; - } - if ( defined($id) && !defined( $seen_lines->{$id} ) ) { - $add_it = 1; - $seen_lines->{$id} = 1; - } - } ## end while ( $output =~ /(?i)START OF READ SMART DATA SECTION(.*)/g...) + my $id; + while ( $output =~ /(?i)Serial Number:(.*)/g ) { + $id = $1; + $id =~ s/^\s+|\s+$//g; + } + if ( defined($id) && !defined( $seen_lines->{$id} ) ) { + $add_it = 1; + $seen_lines->{$id} = 1; + } if ( $continue && $add_it ) { $drive_lines = $drive_lines @@ -365,8 +374,7 @@ if ( defined( $opts{g} ) ) { $drive_count++; } ## end while ($continue) } else { - my $sg_drive_int = 0; - my $drive_count = 0; + my $drive_count = 0; # count the connector lines, this will make sure failed are founded as well while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { if ( !defined( $seen_lines->{$1} ) ) { @@ -377,10 +385,16 @@ if ( defined( $opts{g} ) ) { my $drive_int = 0; while ( $drive_int < $drive_count ) { $drive_lines - = $drive_lines . $cciss . '0-' . $drive_int . ' ' . $device . ' -d cciss,' . $drive_int . "\n"; + = $drive_lines + . $cciss + . $sg_int . '-' + . $drive_int . ' ' + . $device + . ' -d cciss,' + . $drive_int . "\n"; $drive_int++; - } + } ## end while ( $drive_int < $drive_count ) } ## end else [ if ( $? != 0 && $output eq '' && !$opts{C})] $sg_int++; From a8bdb282a14071da1f80ecd4cafa02292dd16aa3 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 28 Jun 2023 11:23:39 -0500 Subject: [PATCH 381/497] add the ability to run tests on all specified devices via -t for smart-v1 (#478) --- snmp/smart-v1 | 56 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 55 insertions(+), 1 deletion(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 9e58e1d96..20da1a944 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -76,6 +76,7 @@ Switches: -g Guess at the config and print it to STDOUT -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g +-t Run the specified smart self test on all the devices. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -121,9 +122,11 @@ sub main::HELP_MESSAGE { -Z GZip+Base64 compress the results. -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g +-t Run the specified smart self test on all the devices. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status + Scan Modes: - scan :: Use "--scan" with smartctl. "scan-open" will take presidence. @@ -140,7 +143,7 @@ Scan Modes: #gets the options my %opts = (); -getopts( 'ugc:pZhvCSG', \%opts ); +getopts( 'ugc:pZhvCSGt:', \%opts ); if ( $opts{h} ) { &HELP_MESSAGE; @@ -472,10 +475,61 @@ while ( defined( $configA[$configA_int] ) ) { $configA_int++; } ## end while ( defined( $configA[$configA_int] ) ) +# +# +# run the specified self test on all disks if asked +# +# +if ( defined( $opts{t} ) ) { + + # make sure we have something that atleast appears sane for the test name + my $valid_tesks = { + 'offline' => 1, + 'short' => 1, + 'long' => 1, + 'conveyance' => 1, + 'afterselect,on' => 1, + }; + if ( !defined( $valid_tesks->{ $opts{t} } ) && $opts{t} !~ /select,(\d+[\-\+]\d+|next|next\+\d+|redo\+\d+)/ ) { + print '"' . $opts{t} . "\" does not appear to be a valid test\n"; + exit 1; + } + + print "Running the SMART $opts{t} on all devices in the config...\n\n"; + + foreach my $line (@disks) { + my $disk; + my $name; + if ( $line =~ /\ / ) { + ( $name, $disk ) = split( /\ /, $line, 2 ); + } else { + $disk = $line; + $name = $line; + } + if ( $disk !~ /\// ) { + $disk = '/dev/' . $disk; + } + + print "\n------------------------------------------------------------------\nDoing " + . $smartctl . ' -t ' + . $opts{t} . ' ' + . $disk + . " ...\n\n"; + print `$smartctl -t $opts{t} $disk` . "\n"; + + } ## end foreach my $line (@disks) + + exit 0; +} ## end if ( defined( $opts{t} ) ) + #if set to 1, no cache will be written and it will be printed instead my $noWrite = 0; +# +# # if no -u, it means we are being called from snmped +# +# if ( !defined( $opts{u} ) ) { # if the cache file exists, print it, otherwise assume one is not being used if ( -f $cache ) { From a510a76c8dd4d0079747136977b41934997aca6e Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 2 Jul 2023 19:01:13 -0500 Subject: [PATCH 382/497] smart-v1: add the ability to run tests on all configured devs via the extend and fix when useSN=1 (#479) * add the ability to run tests on all specified devices via -t * properly save the results when exit is non-zero and useSN=1 --- snmp/smart-v1 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 20da1a944..0df9abd73 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -866,6 +866,8 @@ foreach my $line (@disks) { # only bother to save this if useSN is not being used if ( !$useSN ) { $to_return->{data}{disks}{$disk_id} = \%IDs; + } elsif ( $IDs{exit} == 0 ) { + $to_return->{data}{disks}{$disk_id} = \%IDs; } } ## end foreach my $line (@disks) From a1a53da8c3eee6999e13c871bcf73b347efd3b8c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 3 Jul 2023 15:49:18 -0500 Subject: [PATCH 383/497] smart-v1: send useSN value along in the data (#480) * send useSN along in the data * remove a extra , --- snmp/smart-v1 | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 0df9abd73..91c2710f9 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -187,13 +187,6 @@ if ( !$opts{g} ) { } } -my $to_return = { - data => { disks => {}, exit_nonzero => 0, unhealthy => 0, }, - version => 1, - error => 0, - errorString => '', -}; - # # # guess if asked @@ -550,6 +543,12 @@ if ( !defined( $opts{u} ) ) { # Process each disk # # +my $to_return = { + data => { disks => {}, exit_nonzero => 0, unhealthy => 0, useSN => $useSN }, + version => 1, + error => 0, + errorString => '', +}; foreach my $line (@disks) { my $disk; my $name; From 0347897a9fd1e3ffb66e41c92ad7b74fec59d590 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 4 Jul 2023 23:32:30 -0500 Subject: [PATCH 384/497] add the logsize extend (#481) * add initial logsize extend * add long chomp * pretty it and now make save the return file even if -b was not specified * errpr->error * clean up the code a bit more * more cleanup * add some more set bits * de-fuck it * add total size as well as the max and min stats between alls ets * add no_minus_d * no_minus_d fix * add .json * add log * minor logic tweaks * make no_minus_d the default * completely rework it and make it overall more stable * fix set size handling * no longer include no_minus_d and also fix date chomping * add old cache file removal and docs --- snmp/logsize | 489 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 489 insertions(+) create mode 100755 snmp/logsize diff --git a/snmp/logsize b/snmp/logsize new file mode 100755 index 000000000..cecb7ea31 --- /dev/null +++ b/snmp/logsize @@ -0,0 +1,489 @@ +#!/usr/bin/env perl + +=head1 NAME + +logsize - LinbreNMS JSON extend for getting log file size monitoring. + +=head1 SYNOPSIS + +logsize [B<-b>] [B<-f> ] + +=head1 SWITCHES + +=head2 -b + +Compress the return via GZip+Base64. + +=head2 -f + +The config file to use. + +=head1 SETUP + +Install the depends. + + # FreeBSD + pkg install p5-File-Find-Rule p5-JSON p5-TOML p5-Time-Piece p5-MIME-Base64 p5-File-Slurp p5-Statistics-Lite + # Debian + apt-get install cpanminus + cpanm File::Find::Rule JSON TOML Time::Piece MIME::Base64 File::Slurp Statistics::Lite + +Create the cache dir, by default "/var/cache/logsize_extend/". + +Either make sure SNMPD can write to the cache dir, by default "/var/cache/logsize_extend/", or +set it up in cron and make sure SNMPD can write to it. + +Then set it up in SNMPD. + + + # if running it via cron + extend logsize /usr/local/etc/snmp/extends/logsize -b + + # if using cron + extend logsize /bin/cat /var/cache/logsize_extend/extend_return + +=head1 CONFIG + +The config format used is TOML. + +Please note that variable part of log_end and log_chomp is dynamically generated at +run time only if those various are undef. log_end and log_chomp if you want to custamize +them are better placed in dir specific sections. + +In general best to leave these defaults alone. + + - .cache_dir :: The cache dir to use. + - Default :: /var/cache/logsize_extend/ + + - .log_end :: Log file ends to look for. $today_name is '%F' and + $today_name_alt1 is '%Y%m%d'. + - Default :: [ '*.log', '*.today', '*.json', '*log', + '*-$today_name', '*-$today_name_alt1' ] + + - .max_age :: How long to keep a file in the cache in days. + - Default :: 30 + + - .log_chomp :: The regexp to use for chomping the the logfiles to get the base + log file name to use for reporting. $today_name is '%F' and + $today_name_alt1 is '%Y%m%d'. + - Default :: ((\-\d\d\d\d\d\d\d\d)*\.log|\.today|\.json|\-$today_name|\-$today_name_alt1)$ + +The log specific sections resize under .set so if we want to create a set named var_log, the hash +would be .set.var_log . + + [sets.var_log] + dir="/var/log/" + +Sets inherit all the configured .log_end and the .log_chomp variables. Each set must have +the value dir defined. + + - .sets.*.dir :: The directory to look under for logs. + - Default :: undef + +So if we want to create a set named foobar that looks under /var/log/foo for files ending in foo or bar, +it would be like below. + + [sets.foobar] + dir="/var/log/foo/" + log_end=["*.foo", "*.bar"] + log_chomp="\.(foo|bar)$" + +Multiple sets may be defined. Below creates var_log, suricata, and suricata_flows. + + [sets.var_log] + dir="/var/log/" + [sets.suricata] + dir="/var/log/suricata/" + [sets.suricata_flows] + dir="/var/log/suricata/flows/current" + +=head1 RETURNED DATA + +This is in in reference to .data in the returned JSON. + + - .failes_sets :: A hash where the keys are they name of the failed set + and values are the error in question. + - .max :: Max size of all log files. + - .mean :: Mean size of all log files. + - .median :: Median size of all log files. + - .min :: Min size of all log files. + - .sets.*.files :: A hash where the keys are the names of the log files found for the current + set and the value is the size of the file. + - .sets.*.mode :: Mode size of log files in the current set. + - .sets.*.max :: Max size of log files in the current set. + - .sets.*.mean :: Mean size of log files in the current set. + - .sets.*.median :: Median size of log files in the current set. + - .sets.*.min :: Min size of log files in the current set. + - .sets.*.mode :: Mode size of log files in the current set. + - .sets.*.size :: Total size of the current set. + - .sets.*.unseen :: A list of files seen in the past 7 days but not currently present. + - .size :: Total size of all sets. + +=cut + +use warnings; +use strict; +use File::Find::Rule; +use JSON; +use Getopt::Std; +use TOML; +use Time::Piece; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Statistics::Lite qw(:all); + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS logsize extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' + +-f Path to the config file. + Default :: /usr/local/etc/logsize.conf + +-b Gzip+Base64 compress the output. +'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 1, + data => { + sets => {}, + failed_sets => {}, + max => undef, + mean => undef, + median => undef, + mode => undef, + min => undef, + size => 0, + }, +}; + +# get current time and time stamp of today +my $t = localtime; +my $today_name = $t->strftime('%F'); +my $today_name_alt1 = $t->strftime('%Y%m%d'); + +#gets the options +my %opts = (); +getopts( 'f:b', \%opts ); +if ( !defined( $opts{f} ) ) { + $opts{f} = '/usr/local/etc/logsize.conf'; +} + +# if the config does not exist or is not readable, no point in continuing +if ( !-f $opts{f} ) { + $return_json->{error} = 1; + $return_json->{errorString} = $opts{f} . ' is not a file or does not eixst'; + print encode_json($return_json) . "\n"; + exit 1; +} elsif ( !-r $opts{f} ) { + $return_json->{error} = 2; + $return_json->{errorString} = $opts{f} . ' is not readable'; + print encode_json($return_json) . "\n"; + exit 2; +} + +# reads in the config +my $config; +my $err; +eval { + my $raw_toml = read_file( $opts{f} ); + ( $config, $err ) = from_toml($raw_toml); +}; +if ($@) { + $return_json->{error} = 3; + $return_json->{errorString} = $opts{f} . ' errored reading or parsing... ' . $@; + print encode_json($return_json) . "\n"; + exit 3; +} elsif ( !$config ) { + $return_json->{error} = 4; + $return_json->{errorString} = $opts{f} . ' errored parsing... ' . $err; + print encode_json($return_json) . "\n"; + exit 4; +} + +# can't do anything if there are no sets +if ( !defined( $config->{sets} ) ) { + $return_json->{error} = 5; + $return_json->{errorString} = $opts{f} . ' does not contain any defined sets'; + print encode_json($return_json) . "\n"; + exit 5; +} + +# set the default cache dir +if ( !defined( $config->{cache_dir} ) ) { + $config->{cache_dir} = '/var/cache/logsize_extend/'; +} + +# make sure we have something we can use for log end +if ( !defined( $config->{log_end} ) ) { + $config->{log_end} = [ '*.log', '*.today', '*.json', '*log', '*-' . $today_name, '*-' . $today_name_alt1 ]; +} else { + if ( ref( $config->{log_end} ) ne 'ARRAY' ) { + $return_json->{error} = 8; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", is not a '; + print encode_json($return_json) . "\n"; + exit 8; + } +} + +# set the default log chomp +if ( !defined( $config->{log_chomp} ) ) { + $config->{log_chomp} + = '((\-\d\d\d\d\d\d\d\d)*\.log|\.today|\.json|\-' . $today_name . '|\-' . $today_name_alt1 . ')$'; +} + +# how long to keep a file in the cache +if ( !defined( $config->{max_age} ) ) { + $config->{max_age} = 30; +} + +# if it exists, make sure it is a directory +if ( -e $config->{cache_dir} && !-d $config->{cache_dir} ) { + $return_json->{error} = 6; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", is not a '; + print encode_json($return_json) . "\n"; + exit 6; +} elsif ( !-e $config->{cache_dir} ) { + eval { mkdir( $config->{cache_dir} ) or die('failed'); }; + if ($@) { + $return_json->{error} = 7; + $return_json->{errorString} = 'The cache_dir, "' . $config->{cache_dir} . '", could not be created. '; + print encode_json($return_json) . "\n"; + exit 7; + } +} + +## +## load the cache now +## + +# gets time objects for now and a day ago +my $t_minus_1d = localtime; +my $t_minus_2d = localtime; +my $t_minus_3d = localtime; +my $t_minus_4d = localtime; +my $t_minus_5d = localtime; +my $t_minus_6d = localtime; +my $t_minus_7d = localtime; +$t_minus_1d -= 86400; +$t_minus_2d -= ( 86400 * 2 ); +$t_minus_3d -= ( 86400 * 3 ); +$t_minus_4d -= ( 86400 * 4 ); +$t_minus_5d -= ( 86400 * 5 ); +$t_minus_6d -= ( 86400 * 6 ); +$t_minus_7d -= ( 86400 * 7 ); + +my $today_cache_file = $config->{cache_dir} . '/' . $today_name; + +my $today_minus_1d_name = $t_minus_1d->strftime('%F'); +my $today_minus_2d_name = $t_minus_2d->strftime('%F'); +my $today_minus_3d_name = $t_minus_3d->strftime('%F'); +my $today_minus_4d_name = $t_minus_4d->strftime('%F'); +my $today_minus_5d_name = $t_minus_5d->strftime('%F'); +my $today_minus_6d_name = $t_minus_6d->strftime('%F'); +my $today_minus_7d_name = $t_minus_7d->strftime('%F'); + +my $minus_d_hash = { + today_minus_1d_file => $config->{cache_dir} . '/' . $today_minus_1d_name, + today_minus_2d_file => $config->{cache_dir} . '/' . $today_minus_2d_name, + today_minus_3d_file => $config->{cache_dir} . '/' . $today_minus_3d_name, + today_minus_4d_file => $config->{cache_dir} . '/' . $today_minus_4d_name, + today_minus_5d_file => $config->{cache_dir} . '/' . $today_minus_5d_name, + today_minus_6d_file => $config->{cache_dir} . '/' . $today_minus_6d_name, + today_minus_7d_file => $config->{cache_dir} . '/' . $today_minus_7d_name, +}; + +my $today_cache = { sets => {} }; + +my $today_minus_cache = {}; +my @minus_d = ( '1d', '2d', '3d', '4d', '5d', '6d', '7d' ); +foreach my $d (@minus_d) { + eval { $today_minus_cache->{$d} = decode_json( read_file( $minus_d_hash->{ 'today_minus_' . $d . '_file' } ) ); }; + if ($@) { + $today_minus_cache->{$d} = { sets => {} }; + } +} + +## +## process each set +## +my @sets = keys( %{ $config->{sets} } ); +my $found_sets = 0; +my @set_sizes; +foreach my $set (@sets) { + + # if any set fails, add it to the list of failed sets + eval { + if ( ref( $config->{sets}{$set} ) ne 'HASH' ) { + die( 'set "' . $set . '" is a ' . ref( $config->{sets}{$set} ) . ' and not a HASH' ); + } + if ( !defined( $config->{sets}{$set}{dir} ) ) { + die( 'set "' . $set . '" has no directory specified' ); + } + + if ( !defined( $config->{sets}{$set}{log_end} ) ) { + $config->{sets}{$set}{log_end} = $config->{log_end}; + } + + if ( !defined( $config->{sets}{$set}{log_chomp} ) ) { + $config->{sets}{$set}{log_chomp} = $config->{log_chomp}; + } + my $chomp = $config->{sets}{$set}{log_chomp}; + + my @files = File::Find::Rule->canonpath()->maxdepth(1)->file()->name( @{ $config->{sets}{$set}{log_end} } ) + ->in( $config->{sets}{$set}{dir} ); + + $return_json->{data}{sets}{$set} = { + files => {}, + max => undef, + mean => undef, + median => undef, + mode => undef, + min => undef, + size => 0, + unseen => [], + }; + + $today_cache->{sets}{$set}{files} = {}; + + # will later be used for regexp for chomping the start of the full path + my $quoted_dir = quotemeta( $config->{sets}{$set}{dir} ); + + my %m_times; + my %seen; + my %log_sizes; # make sure we don't have any twice + foreach my $log (@files) { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat($log); + + $log =~ s/^$quoted_dir//; + $log =~ s/^\///; + $log =~ s/$chomp//; + + # if we find a log twice, make sure it is the new one + if ( !defined( $m_times{$log} ) || $mtime > $m_times{$log} ) { + $seen{$log} = 1; + $m_times{$log} = $mtime; + $log_sizes{$log} = $size; + $return_json->{data}{sets}{$set}{files}{$log} = $size; + + # save the basic info for currently + $today_cache->{sets}{$set}{files}{$log} = { + dev => $dev, + ino => $ino, + rdev => $rdev, + size => $size, + mode => $mode, + nlink => $nlink, + uid => $uid, + gid => $gid, + atime => $atime, + mtime => $mtime, + ctime => $ctime, + blksize => $blksize, + blocks => $blocks + }; + + } ## end if ( !defined( $m_times{$log} ) || $mtime ...) + } ## end foreach my $log (@files) + + # compute the stats for log sizes + my @size_keys = keys(%log_sizes); + my @sizes; + foreach my $item (@size_keys) { + push( @sizes, $return_json->{data}{sets}{$set}{files}{$item} ); + } + $return_json->{data}{sets}{$set}{max} = max(@sizes); + $return_json->{data}{sets}{$set}{mean} = mean(@sizes); + $return_json->{data}{sets}{$set}{median} = median(@sizes); + $return_json->{data}{sets}{$set}{mode} = mode(@sizes); + $return_json->{data}{sets}{$set}{min} = min(@sizes); + $return_json->{data}{sets}{$set}{size} = sum(@sizes); + + push( @set_sizes, $return_json->{data}{sets}{$set}{size} ); + + # looks for missing files and adds them to unseen + my %unseen; + foreach my $d (@minus_d) { + my @old_logs = keys( %{ $today_minus_cache->{$d}{sets}{$set}{files} } ); + foreach my $item (@old_logs) { + if ( !defined( $return_json->{data}{sets}{$set}{files}{$item} ) && !defined( $unseen{$item} ) ) { + $unseen{$item} = 1; + push( @{ $return_json->{data}{sets}{$set}{unseen} }, $item ); + } + + } + } ## end foreach my $d (@minus_d) + }; + + # if the above died, add it to a list of failed sets + if ($@) { + $return_json->{data}{failed_sets}{$set} = $@; + } + + $found_sets++; +} ## end foreach my $set (@sets) + +# compute the over all stats +$return_json->{data}{max} = max(@set_sizes); +$return_json->{data}{mean} = mean(@set_sizes); +$return_json->{data}{median} = median(@set_sizes); +$return_json->{data}{mode} = mode(@set_sizes); +$return_json->{data}{min} = min(@set_sizes); +$return_json->{data}{size} = sum(@set_sizes); + +# if this is not atleast one, then no sets are defined, even if the hash exists +if ( $found_sets < 1 ) { + $return_json->{error} = 8; + $return_json->{errorString} = $opts{f} . ' lacks defined log sets'; + print encode_json($return_json) . "\n"; + exit 8; +} + +## +## encode the return and print it +## +my $return_string = encode_json($return_json) . "\n"; +eval { write_file( $config->{cache_dir} . "/extend_raw", $return_string ); }; +if ( !$opts{b} ) { + eval { write_file( $config->{cache_dir} . "/extend_return", $return_string ); }; + print $return_string; +} else { + my $toReturnCompressed; + gzip \$return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + eval { write_file( $config->{cache_dir} . "/extend_return", $return_string ); }; + print $return_string; + } else { + eval { write_file( $config->{cache_dir} . "/extend_return", $compressed ); }; + print $compressed; + } +} ## end else [ if ( !$opts{b} ) ] + +## +## save the cache +## +eval { write_file( $today_cache_file, encode_json($today_cache) . "\n" ); }; + +## +## remove old cache files +## +my $older_than = $t->epoch - ( $config->{max_age} * 86400 ); +my @old_cache_files + = File::Find::Rule->canonpath()->maxdepth(1)->file()->mtime( '<' . $older_than )->in( $config->{cache_dir} ); + +#use Data::Dumper; print Dumper(@old_cache_files); +foreach my $old_file (@old_cache_files) { + unlink($old_file); +} From 8babee70d971550cf85ff5a5a718d39deb3eafb7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 6 Jul 2023 13:27:56 -0500 Subject: [PATCH 385/497] smart-v1: add handling unknown cciss devs (#482) * add -u option * add a small fix for when smart fails and useSN is false * some more possible cciss error handling * rename -u for with guess to -U to avoid conflict * add U to getopts --- snmp/smart-v1 | 29 +++++++++++++++++++++++------ 1 file changed, 23 insertions(+), 6 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 91c2710f9..fab2af3bd 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -77,6 +77,7 @@ Switches: -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g -t Run the specified smart self test on all the devices. +-U When calling cciss_vol_status, call it with -u. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -89,7 +90,8 @@ Guess Modes: - cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, and then optionally checking for disks via smrtctl if -C is given. Should be noted - though that -C will not find drives that are currently missing/failed. + though that -C will not find drives that are currently missing/failed. If -U is given, + cciss_vol_status will be called with -u. =cut @@ -111,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.2.0\n"; + print "SMART SNMP extend 0.3.0\n"; } sub main::HELP_MESSAGE { @@ -123,6 +125,7 @@ sub main::HELP_MESSAGE { -C Enable manual checking for guess and cciss. -S Set useSN to 0 when using -g -t Run the specified smart self test on all the devices. +-U When calling cciss_vol_status, call it with -u. -G Guess modes to use. This is a comma seperated list. Default :: scan-open,cciss-vol-status @@ -136,14 +139,15 @@ Scan Modes: - cciss-vol-status :: Freebsd/Linux specific and if it sees /dev/sg0(on Linux) or /dev/ciss0(on FreebSD) it will attempt to find drives via cciss-vol-status, and then optionally checking for disks via smrtctl if -C is given. Should be noted - though that -C will not find drives that are currently missing/failed. + though that -C will not find drives that are currently missing/failed. If -U is given, + cciss_vol_status will be called with -u. '; } ## end sub main::HELP_MESSAGE #gets the options my %opts = (); -getopts( 'ugc:pZhvCSGt:', \%opts ); +getopts( 'ugc:pZhvCSGt:U', \%opts ); if ( $opts{h} ) { &HELP_MESSAGE; @@ -314,6 +318,11 @@ if ( defined( $opts{g} ) ) { $cciss = 'sg'; } + my $uarg = ''; + if ( $opts{U} ) { + $uarg = '-u'; + } + # generate the initial device path that will be checked my $sg_int = 0; my $device = '/dev/' . $cciss . $sg_int; @@ -335,7 +344,7 @@ if ( defined( $opts{g} ) ) { } ## end if ( -e $device ) my $seen_lines = {}; while ( -e $device && $sg_process ) { - my $output = `cciss_vol_status -V $device 2> /dev/null`; + my $output = `cciss_vol_status -V $uarg $device 2> /dev/null`; if ( $? != 0 && $output eq '' && !$opts{C} ) { # just empty here as we just want to skip it if it fails and there is no C # warning is above @@ -561,6 +570,7 @@ foreach my $line (@disks) { if ( $disk !~ /\// ) { $disk = '/dev/' . $disk; } + my $output = `$smartctl -A $disk`; my %IDs = ( '5' => 'null', @@ -865,9 +875,16 @@ foreach my $line (@disks) { # only bother to save this if useSN is not being used if ( !$useSN ) { $to_return->{data}{disks}{$disk_id} = \%IDs; - } elsif ( $IDs{exit} == 0 ) { + } elsif ( $IDs{exit} == 0 && defined($disk_id) ) { $to_return->{data}{disks}{$disk_id} = \%IDs; } + + # smartctl will in some cases exit zero when it can't pull data for cciss + # so if we get a zero exit, but no serial then it means something errored + # and the device is likely dead + if ( $IDs{exit} == 0 && !defined( $IDs{serial} ) ) { + $to_return->{data}{unhealthy}++; + } } ## end foreach my $line (@disks) my $toReturn = $json->encode($to_return); From a219c063fe7810453e6924770bfa3f1de20a92a4 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 6 Jul 2023 16:36:29 -0500 Subject: [PATCH 386/497] smart-v1: fix handling of cciss when it a disk is being replaced (#483) * only add connectors that have not been seen for cciss * rework seen logic to also have a ignore logic as well for cciss * add a comment explaining why it is being ignored * minor version bump --- snmp/smart-v1 | 26 ++++++++++++++++++++------ 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index fab2af3bd..545282b99 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -113,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.3.0\n"; + print "SMART SNMP extend 0.3.1\n"; } sub main::HELP_MESSAGE { @@ -342,7 +342,8 @@ if ( defined( $opts{g} ) ) { . "' is failing\n"; } ## end if ( $? != 0 && !$opts{C} ) } ## end if ( -e $device ) - my $seen_lines = {}; + my $seen_lines = {}; + my $ignore_lines = {}; while ( -e $device && $sg_process ) { my $output = `cciss_vol_status -V $uarg $device 2> /dev/null`; if ( $? != 0 && $output eq '' && !$opts{C} ) { @@ -381,12 +382,25 @@ if ( defined( $opts{g} ) ) { } else { my $drive_count = 0; # count the connector lines, this will make sure failed are founded as well - while ( $output =~ /(connector +\d.*box +\d.*bay +\d.*)/g ) { - if ( !defined( $seen_lines->{$1} ) ) { - $seen_lines->{$1} = 1; + my $seen_conectors = {}; + while ( $output =~ /(connector +\d+[IA]\ +box +\d+\ +bay +\d+.*)/g ) { + my $cciss_drive_line = $1; + my $connector = $cciss_drive_line; + $connector =~ s/(.*\ bay +\d+).*/$1/; + if ( !defined( $seen_lines->{$cciss_drive_line} ) + && !defined( $seen_conectors->{$connector} ) + && !defined( $ignore_lines->{$cciss_drive_line} ) ) + { + $seen_lines->{$cciss_drive_line} = 1; + $seen_conectors->{$connector} = 1; $drive_count++; + } else { + # going to be a connector we've already seen + # which will happen when it is processing replacement drives + # so save this as a device to ignore + $ignore_lines->{$cciss_drive_line} = 1; } - } + } ## end while ( $output =~ /(connector +\d+[IA]\ +box +\d+\ +bay +\d+.*)/g) my $drive_int = 0; while ( $drive_int < $drive_count ) { $drive_lines From 224a7f6f2820ef3a5c3b406af977b52cf5ce0426 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Wed, 2 Aug 2023 23:21:39 +0200 Subject: [PATCH 387/497] check for puppet v7 summary file (#485) --- snmp/puppet_agent.py | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/puppet_agent.py b/snmp/puppet_agent.py index 8afa87d1e..a44683451 100755 --- a/snmp/puppet_agent.py +++ b/snmp/puppet_agent.py @@ -21,6 +21,7 @@ summary_files = [ + "/var/cache/puppet/public/last_run_summary.yaml", "/var/cache/puppet/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/cache/state/last_run_summary.yaml", "/opt/puppetlabs/puppet/public/last_run_summary.yaml", From 28ca0993bc2e739d946f1ff637f9cb6b31ab9038 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 21 Sep 2023 23:46:40 -0500 Subject: [PATCH 388/497] add new dhcp extend (#488) --- snmp/dhcp | 319 ++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 319 insertions(+) create mode 100755 snmp/dhcp diff --git a/snmp/dhcp b/snmp/dhcp new file mode 100755 index 000000000..67ad5dc2f --- /dev/null +++ b/snmp/dhcp @@ -0,0 +1,319 @@ +#!/usr/bin/env perl +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=head1 NAME + +dhcp - LibreNMS ISC-DHCPD stats extend + +=head1 SYNOPSIS + +dhcp [B<-Z>] [B<-d>] [B<-p>] [B<-l> ] + +=head1 FLAGS + +=head2 -l + +Path to the lease file. + +=head2 -Z + +Enable GZip+Base64 compression. + +=head2 -d + +Do not de-dup. + +This is done via making sure the combination of UID, CLTT, IP, HW address, +client hostname, and state are unique. + +=head1 Return JSON Data Hash + + - .all_networks.cur :: Current leases for all networks + - .all_networks.max :: Max possible leases for all networks + - .all_networks.percent :: Percent of total pool usage. + + - .networks.[].cur :: Current leases for the network. + - .networks.[].max :: Max possible leases for thenetworks + - .networks.[].network :: Subnet of the network. + - .networks.[].percent :: Percent of network usage. + + - .pools.[].cur :: Current leases for the pool. + - .pools.[].max :: Max possible leases for pool. + - .pools.[].first_ip :: First IP of the pool. + - .pools.[].last_ip :: Last IP of the pool. + - .pools.[].percent :: Percent of pool usage. + + - .found_leases.[].client_hostname :: Hostname the client passed during the request. + - .found_leases.[].cltt :: The CLTT for the requist. + - .found_leases.[].ends :: Unix time of of when the lease ends. + - .found_leases.[].hw_address :: Hardware address for the client that made the request. + - .found_leases.[].ip :: IP address of the client that made the request. + - .found_leases.[].starts :: Unix time of of when the lease starts. + - .found_leases.[].state :: State of the lease. + - .found_leases.[].uid :: UID passed during the request. + +=cut + +use strict; +use warnings; +use Getopt::Std; +use JSON -convert_blessed_universally; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Net::ISC::DHCPd::Leases; + +my %opts; +getopts( 'l:Zdp', \%opts ); + +if ( !defined( $opts{l} ) ) { + # if freebsd, set it to the default path as used by the version installed via ports + # + # additional elsifs should be added as they become known, but default works for most Linux distros + if ( $^O eq 'freebsd' ) { + $opts{l} = '/var/db/dhcpd/dhcpd.leases'; + } else { + $opts{l} = '/var/lib/dhcpd/dhcpd.leases'; + } +} ## end if ( !defined( $opts{l} ) ) + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS ISC-DHCPD extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' +-l Path to the lease file. +-Z Enable GZip+Base64 compression. +-d Do not de-dup. +'; + + exit; +} + +my $to_return = { + data => { + lease_file => $opts{l}, + found_leases => [], + leases => { + abandoned => 0, + active => 0, + backup => 0, + bootp => 0, + expired => 0, + free => 0, + released => 0, + reset => 0, + total => 0, + }, + networks => [], + pools => [], + all_networks => {}, + }, + version => 3, + error => 0, + errorString => '', +}; + +if ( !-f $opts{l} && !-r $opts{l} ) { + $to_return->{error} = 2; + $to_return->{errorString} = '"' . $opts{l} . '" does not exist, is not a file, or is not readable'; + print decode_json($to_return) . "\n"; + exit; +} + +my $found_leases = {}; + +my $leases; +eval { + my $leases_obj = Net::ISC::DHCPd::Leases->new( file => $opts{l} ); + $leases_obj->parse; + $leases = $leases_obj->leases; +}; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = 'Reading leases failed... ' . $@; + print decode_json($to_return) . "\n"; + exit; +} + +use Data::Dumper; + +foreach my $lease ( @{$leases} ) { + if ( !defined( $lease->{uid} ) ) { + $lease->{uid} = ''; + } + if ( !defined( $lease->{vendor_class_identifier} ) ) { + $lease->{vendor_class_identifier} = ''; + } + if ( !defined( $lease->{cltt} ) ) { + $lease->{cltt} = ''; + } + if ( !defined( $lease->{state} ) ) { + $lease->{state} = ''; + } + if ( !defined( $lease->{ip_address} ) ) { + $lease->{ip_address} = ''; + } + if ( !defined( $lease->{hardware_address} ) ) { + $lease->{hardware_address} = ''; + } + if ( !defined( $lease->{client_hostname} ) ) { + $lease->{client_hostname} = ''; + } +} ## end foreach my $lease ( @{$leases} ) + +# dedup or copy lease info as is +if ( !$opts{d} ) { + foreach my $lease ( @{$leases} ) { + $found_leases->{ $lease->{uid} + . $lease->{cltt} + . $lease->{uid} + . $lease->{ip_address} + . $lease->{client_hostname} + . $lease->{state} + . $lease->{hardware_address} } = $lease; + } + foreach my $lease_key ( keys( %{$found_leases} ) ) { + push( + @{ $to_return->{data}{found_leases} }, + { + uid => $found_leases->{$lease_key}{uid}, + cltt => $found_leases->{$lease_key}{cltt}, + state => $found_leases->{$lease_key}{state}, + ip => $found_leases->{$lease_key}{ip_address}, + hw_address => $found_leases->{$lease_key}{hardware_address}, + starts => $found_leases->{$lease_key}{starts}, + ends => $found_leases->{$lease_key}{ends}, + client_hostname => $found_leases->{$lease_key}{client_hostname}, + } + ); + } ## end foreach my $lease_key ( keys( %{$found_leases} ...)) +} else { + foreach my $lease ( @{$leases} ) { + push( + @{ $to_return->{data}{found_leases} }, + { + uid => $lease->{uid}, + cltt => $lease->{cltt}, + state => $lease->{state}, + ip => $lease->{ip_address}, + hw_address => $lease->{hardware_address}, + starts => $lease->{starts}, + ends => $lease->{ends}, + client_hostname => $lease->{client_hostname}, + } + ); + } ## end foreach my $lease ( @{$leases} ) +} ## end else [ if ( !$opts{d} ) ] + +#print Dumper($leases); + +# total the lease info types +foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { + $to_return->{data}{leases}{total}++; + if ( $lease->{state} eq 'free' ) { + $to_return->{data}{leases}{free}++; + } elsif ( $lease->{state} eq 'abandoned' ) { + $to_return->{data}{leases}{abandoned}++; + } elsif ( $lease->{state} eq 'active' ) { + $to_return->{data}{leases}{active}++; + } elsif ( $lease->{state} eq 'backup' ) { + $to_return->{data}{leases}{backup}++; + } elsif ( $lease->{state} eq 'bootp' ) { + $to_return->{data}{leases}{bootp}++; + } elsif ( $lease->{state} eq 'expired' ) { + $to_return->{data}{leases}{expired}++; + } elsif ( $lease->{state} eq 'released' ) { + $to_return->{data}{leases}{released}++; + } elsif ( $lease->{state} eq 'reset' ) { + $to_return->{data}{leases}{reset}++; + } +} ## end foreach my $lease ( @{ $to_return->{data}{found_leases...}}) + +my $cmd_output = `dhcpd-pools -s i -A -l $opts{l} 2> /dev/null`; +my $category = ''; +for my $line ( split( /\n/, $cmd_output ) ) { + $line =~ s/^ +//; + my @line_split = split( /[\ \t]+/, $line ); + if ( $line =~ /^Ranges\:/ ) { + $category = 'pools'; + } elsif ( $line =~ /^Shared\ networks\:/ ) { + $category = 'networks'; + } elsif ( $line =~ /^Sum\ of\ all\ ranges\:/ ) { + $category = 'all_networks'; + } elsif ( $category eq 'pools' && defined( $line_split[4] ) && $line_split[4] =~ /^\d+$/ ) { + push( + @{ $to_return->{data}{pools} }, + { + first_ip => $line_split[1], + last_ip => $line_split[3], + max => $line_split[4], + cur => $line_split[5], + percent => $line_split[6], + } + ); + } elsif ( $category eq 'networks' + && defined( $line_split[1] ) + && $line_split[1] =~ /^\d+$/ + && defined( $line_split[2] ) + && $line_split[2] =~ /^\d+$/ ) + { + push( + @{ $to_return->{data}{networks} }, + { + network => $line_split[0], + max => $line_split[1], + cur => $line_split[2], + percent => $line_split[3], + } + ); + } elsif ( $category eq 'all_networks' ) { + $to_return->{data}{all_networks}{max} = $line_split[2]; + $to_return->{data}{all_networks}{cur} = $line_split[3]; + $to_return->{data}{all_networks}{percent} = $line_split[4]; + } +} ## end for my $line ( split( /\n/, $cmd_output ) ) + +my $json = JSON->new->allow_nonref->canonical(1); +if ( $opts{p} ) { + $json->pretty; +} +my $toReturn = $json->encode($to_return) . "\n"; +if ( $opts{Z} ) { + my $toReturnCompressed; + gzip \$toReturn => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) < length($toReturn) ) { + $toReturn = $compressed; + } +} ## end if ( $opts{Z} ) + +print $toReturn; + +exit; + From d36cdac68ce8ad666f520254817d30a043fc8d0c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 22 Sep 2023 01:51:14 -0500 Subject: [PATCH 389/497] base64 encode possible fields for DHCP that may possibly include binary data (#489) * base64 encode uid and client hostname for dhcp lease info * cleanup vendor_class_identifier as well and note the changes --- snmp/dhcp | 75 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 57 insertions(+), 18 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 67ad5dc2f..97e4e8e6a 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -72,6 +72,14 @@ client hostname, and state are unique. - .found_leases.[].starts :: Unix time of of when the lease starts. - .found_leases.[].state :: State of the lease. - .found_leases.[].uid :: UID passed during the request. + - .found_leases.[].vendor_class_identifier :: Vendor class identifier passed during the request. + +The following are Base64 encoded as they may include binary that breaks either SNMP or +the PHP JSON decoder. + + - .found_leases.[].vendor_class_identifier + - .found_leases.[].uid :: UID passed during the request. + - .found_leases.[].vendor_class_identifier =cut @@ -100,7 +108,7 @@ if ( !defined( $opts{l} ) ) { $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "LibreNMS ISC-DHCPD extend 0.0.1\n"; + print "LibreNMS ISC-DHCPD extend 0.0.2\n"; } sub main::HELP_MESSAGE { @@ -197,33 +205,65 @@ if ( !$opts{d} ) { . $lease->{hardware_address} } = $lease; } foreach my $lease_key ( keys( %{$found_leases} ) ) { + my $uid = $found_leases->{$lease_key}{uid}; + if ( $uid ne '' ) { + $uid = encode_base64($uid); + chomp($uid); + } + my $client_hostname = $found_leases->{$lease_key}{client_hostname}; + if ( $client_hostname ne '' ) { + $client_hostname = encode_base64($client_hostname); + chomp($client_hostname); + } + my $vendor_class_identifier = $found_leases->{$lease_key}{vendor_class_identifier}; + if ( $vendor_class_identifier ne '' ) { + $vendor_class_identifier = encode_base64($vendor_class_identifier); + chomp($vendor_class_identifier); + } push( @{ $to_return->{data}{found_leases} }, { - uid => $found_leases->{$lease_key}{uid}, - cltt => $found_leases->{$lease_key}{cltt}, - state => $found_leases->{$lease_key}{state}, - ip => $found_leases->{$lease_key}{ip_address}, - hw_address => $found_leases->{$lease_key}{hardware_address}, - starts => $found_leases->{$lease_key}{starts}, - ends => $found_leases->{$lease_key}{ends}, - client_hostname => $found_leases->{$lease_key}{client_hostname}, + uid => $uid, + cltt => $found_leases->{$lease_key}{cltt}, + state => $found_leases->{$lease_key}{state}, + ip => $found_leases->{$lease_key}{ip_address}, + hw_address => $found_leases->{$lease_key}{hardware_address}, + starts => $found_leases->{$lease_key}{starts}, + ends => $found_leases->{$lease_key}{ends}, + client_hostname => $client_hostname, + vendor_class_identifier => $vendor_class_identifier, } ); } ## end foreach my $lease_key ( keys( %{$found_leases} ...)) } else { foreach my $lease ( @{$leases} ) { + my $uid = $lease->{uid}; + if ( $uid ne '' ) { + $uid = encode_base64($uid); + chomp($uid); + } + my $client_hostname = $lease->{client_hostname}; + if ( $client_hostname ne '' ) { + $client_hostname = encode_base64($client_hostname); + chomp($client_hostname); + } + my $vendor_class_identifier = $lease->{vendor_class_identifier}; + if ( $vendor_class_identifier ne '' ) { + $vendor_class_identifier = encode_base64($vendor_class_identifier); + chomp($vendor_class_identifier); + } push( @{ $to_return->{data}{found_leases} }, { - uid => $lease->{uid}, - cltt => $lease->{cltt}, - state => $lease->{state}, - ip => $lease->{ip_address}, - hw_address => $lease->{hardware_address}, - starts => $lease->{starts}, - ends => $lease->{ends}, - client_hostname => $lease->{client_hostname}, + uid => $uid, + cltt => $lease->{cltt}, + state => $lease->{state}, + ip => $lease->{ip_address}, + hw_address => $lease->{hardware_address}, + starts => $lease->{starts}, + ends => $lease->{ends}, + client_hostname => $client_hostname, + vendor_class_identifier => $vendor_class_identifier, } ); } ## end foreach my $lease ( @{$leases} ) @@ -316,4 +356,3 @@ if ( $opts{Z} ) { print $toReturn; exit; - From 2a671ff05d68d46aa13e5199895095c14eb5b356 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 24 Sep 2023 13:49:07 -0500 Subject: [PATCH 390/497] for dhcp dedup, don't use uid twice and don't use cltt (#490) --- snmp/dhcp | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 97e4e8e6a..3a8da3939 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -197,8 +197,6 @@ foreach my $lease ( @{$leases} ) { if ( !$opts{d} ) { foreach my $lease ( @{$leases} ) { $found_leases->{ $lease->{uid} - . $lease->{cltt} - . $lease->{uid} . $lease->{ip_address} . $lease->{client_hostname} . $lease->{state} From bd5892e79769e181d77d772bc3a65835c46e06a0 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Oct 2023 12:17:01 -0500 Subject: [PATCH 391/497] dhcp extend nolonger requires dhcpd-pools (#491) * more work on implementing dhcp config parsing * no longer require non-perl depends --- snmp/dhcp | 282 ++++++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 231 insertions(+), 51 deletions(-) diff --git a/snmp/dhcp b/snmp/dhcp index 3a8da3939..66466c2d8 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -47,6 +47,17 @@ Do not de-dup. This is done via making sure the combination of UID, CLTT, IP, HW address, client hostname, and state are unique. +=head2 -n + +If no shared networks are defined, what to use for generating the network names +for reporting purposes. + + - cidr :: Use the cidr for the defined subnets. + + - cidr+range :: Use the cidr+range for the defined subnets. + +Default is 'cidr'. + =head1 Return JSON Data Hash - .all_networks.cur :: Current leases for all networks @@ -54,15 +65,19 @@ client hostname, and state are unique. - .all_networks.percent :: Percent of total pool usage. - .networks.[].cur :: Current leases for the network. - - .networks.[].max :: Max possible leases for thenetworks - - .networks.[].network :: Subnet of the network. + - .networks.[].max :: Max possible leases for the networks + - .networks.[].network :: Name of the network. + - .networks.[].subnets :: Array of subnets on the network. - .networks.[].percent :: Percent of network usage. + - .networks.[].pools :: Pool ranges used. - .pools.[].cur :: Current leases for the pool. - .pools.[].max :: Max possible leases for pool. - .pools.[].first_ip :: First IP of the pool. - .pools.[].last_ip :: Last IP of the pool. - .pools.[].percent :: Percent of pool usage. + - .pools.[].cidr :: CIDR for this subnet. + - .pools.[].$option :: Additional possible DHCP subnet option. - .found_leases.[].client_hostname :: Hostname the client passed during the request. - .found_leases.[].cltt :: The CLTT for the requist. @@ -90,9 +105,18 @@ use JSON -convert_blessed_universally; use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Net::ISC::DHCPd::Leases; +use Net::ISC::DHCPd::Config; my %opts; -getopts( 'l:Zdp', \%opts ); +getopts( 'l:Zdpc:n:', \%opts ); + +if ( !defined( $opts{n} ) ) { + $opts{n} = 'cidr'; +} else { + if ( $opts{n} ne 'cidr' && $opts{n} ne 'cidr+range' ) { + $opts{n} = 'cidr'; + } +} if ( !defined( $opts{l} ) ) { # if freebsd, set it to the default path as used by the version installed via ports @@ -105,6 +129,17 @@ if ( !defined( $opts{l} ) ) { } } ## end if ( !defined( $opts{l} ) ) +if ( !defined( $opts{c} ) ) { + # if freebsd, set it to the default path as used by the version installed via ports + # + # additional elsifs should be added as they become known, but default works for most Linux distros + if ( $^O eq 'freebsd' ) { + $opts{c} = '/usr/local/etc/dhcpd.conf'; + } else { + $opts{c} = '/etc/dhcp/dhcpd.conf'; + } +} ## end if ( !defined( $opts{c} ) ) + $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { @@ -114,12 +149,13 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print ' -l Path to the lease file. +-c Path to the dhcpd.conf file. -Z Enable GZip+Base64 compression. -d Do not de-dup. '; exit; -} +} ## end sub main::HELP_MESSAGE my $to_return = { data => { @@ -138,7 +174,7 @@ my $to_return = { }, networks => [], pools => [], - all_networks => {}, + all_networks => { cur => 0, max => 0, percent => 0, }, }, version => 3, error => 0, @@ -152,8 +188,14 @@ if ( !-f $opts{l} && !-r $opts{l} ) { exit; } +# hash for storing found leases for later deduping my $found_leases = {}; +## +## +## read in the leases +## +## my $leases; eval { my $leases_obj = Net::ISC::DHCPd::Leases->new( file => $opts{l} ); @@ -167,8 +209,11 @@ if ($@) { exit; } -use Data::Dumper; - +## +## +## process found leases +## +## foreach my $lease ( @{$leases} ) { if ( !defined( $lease->{uid} ) ) { $lease->{uid} = ''; @@ -193,7 +238,11 @@ foreach my $lease ( @{$leases} ) { } } ## end foreach my $lease ( @{$leases} ) -# dedup or copy lease info as is +## +## +## dedup or copy lease info as is +## +## if ( !$opts{d} ) { foreach my $lease ( @{$leases} ) { $found_leases->{ $lease->{uid} @@ -267,9 +316,11 @@ if ( !$opts{d} ) { } ## end foreach my $lease ( @{$leases} ) } ## end else [ if ( !$opts{d} ) ] -#print Dumper($leases); - -# total the lease info types +## +## +## total the lease info types +## +## foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { $to_return->{data}{leases}{total}++; if ( $lease->{state} eq 'free' ) { @@ -291,50 +342,179 @@ foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { } } ## end foreach my $lease ( @{ $to_return->{data}{found_leases...}}) -my $cmd_output = `dhcpd-pools -s i -A -l $opts{l} 2> /dev/null`; -my $category = ''; -for my $line ( split( /\n/, $cmd_output ) ) { - $line =~ s/^ +//; - my @line_split = split( /[\ \t]+/, $line ); - if ( $line =~ /^Ranges\:/ ) { - $category = 'pools'; - } elsif ( $line =~ /^Shared\ networks\:/ ) { - $category = 'networks'; - } elsif ( $line =~ /^Sum\ of\ all\ ranges\:/ ) { - $category = 'all_networks'; - } elsif ( $category eq 'pools' && defined( $line_split[4] ) && $line_split[4] =~ /^\d+$/ ) { - push( - @{ $to_return->{data}{pools} }, - { - first_ip => $line_split[1], - last_ip => $line_split[3], - max => $line_split[4], - cur => $line_split[5], - percent => $line_split[6], +## +## +## read in the config +## +## +my $config_obj; +eval { + $config_obj = Net::ISC::DHCPd::Config->new( file => $opts{c} ); + $config_obj->parse; +}; +if ($@) { + $to_return->{error} = 3; + $to_return->{errorString} = 'Reading leases failed... ' . $@; + print decode_json($to_return) . "\n"; + exit; +} + +## +## +## process found subnets +## +## +my $pools = {}; +my @subnets = $config_obj->subnets; +foreach my $subnet (@subnets) { + my @ranges = $subnet->ranges; + my $subnet_cidr_obj = $subnet->address; + my $subnet_cidr = $subnet_cidr_obj->addr; + foreach my $range (@ranges) { + my $lower = $range->lower; + my $upper = $range->upper; + my $pool_name = $lower->addr . '-' . $upper->addr; + my $subnet_addr = $subnet_cidr_obj->addr; + my $subnet_cidr = $subnet_cidr_obj->cidr; + my $max = $upper->bigint - $lower->bigint; + $pools->{$pool_name} = { + first_ip => $lower->addr, + lower => $lower, + last_ip => $upper->addr, + upper => $upper, + subnet => $subnet_addr, + cidr => $subnet_cidr, + max => $max, + cur => 0, + percent => 0, + }; + my @options = $subnet->options; + + foreach my $option (@options) { + $pools->{$pool_name}{ $option->name } = $option->value; + } + } ## end foreach my $range (@ranges) +} ## end foreach my $subnet (@subnets) + +## +## +## process found networks and subnets contained on in +## +## +my $networks = {}; +my @found_subnets = $config_obj->sharednetworks; +my $undef_network_name_int = 0; +foreach my $network (@found_subnets) { + my $name = $network->name; + if ( !defined($name) || $name eq '' ) { + $name = 'undef' . $undef_network_name_int; + $undef_network_name_int++; + } + if ( !defined( $networks->{$name} ) ) { + $networks->{$name} = []; + } + + @subnets = $network->subnets; + foreach my $subnet (@subnets) { + my @ranges = $subnet->ranges; + my $subnet_cidr_obj = $subnet->address; + my $subnet_cidr = $subnet_cidr_obj->addr; + foreach my $range (@ranges) { + my $lower = $range->lower; + my $upper = $range->upper; + my $pool_name = $lower->addr . '-' . $upper->addr; + my $max = $upper->bigint - $lower->bigint; + $pools->{$pool_name} = { + first_ip => $lower->addr, + lower => $lower, + last_ip => $upper->addr, + upper => $upper, + subnet => $subnet_cidr, + max => $max, + cur => 0, + percent => 0, + }; + my @options = $subnet->options; + foreach my $option (@options) { + $pools->{$pool_name}{ $option->name } = $option->value; } - ); - } elsif ( $category eq 'networks' - && defined( $line_split[1] ) - && $line_split[1] =~ /^\d+$/ - && defined( $line_split[2] ) - && $line_split[2] =~ /^\d+$/ ) - { - push( - @{ $to_return->{data}{networks} }, - { - network => $line_split[0], - max => $line_split[1], - cur => $line_split[2], - percent => $line_split[3], + + push( @{ $networks->{$name} }, $pool_name ); + } ## end foreach my $range (@ranges) + } ## end foreach my $subnet (@subnets) +} ## end foreach my $network (@found_subnets) + +## +## +## puts the pools array together +## +## +foreach my $pool_key ( keys( %{$pools} ) ) { + my $lower = $pools->{$pool_key}{lower}; + delete( $pools->{$pool_key}{lower} ); + my $upper = $pools->{$pool_key}{upper}; + delete( $pools->{$pool_key}{upper} ); + + # check each lease for if it is between the upper and lower IPs + # then increment current if the state is active + foreach my $lease ( @{ $to_return->{data}{found_leases} } ) { + my $lease_ip = NetAddr::IP->new( $lease->{ip} ); + if ( $lower <= $lease_ip && $lease_ip <= $upper ) { + if ( $lease->{state} eq 'active' ) { + $pools->{$pool_key}{cur}++; } - ); - } elsif ( $category eq 'all_networks' ) { - $to_return->{data}{all_networks}{max} = $line_split[2]; - $to_return->{data}{all_networks}{cur} = $line_split[3]; - $to_return->{data}{all_networks}{percent} = $line_split[4]; + } } -} ## end for my $line ( split( /\n/, $cmd_output ) ) + $pools->{$pool_key}{percent} = ( $pools->{$pool_key}{cur} / $pools->{$pool_key}{max}->numify() ) * 100; + $pools->{$pool_key}{max} = $pools->{$pool_key}{max}->bstr; + + # add the current and max to all_networks(reall all subnets)... + $to_return->{data}{all_networks}{cur} = $to_return->{data}{all_networks}{cur} + $pools->{$pool_key}{cur}; + $to_return->{data}{all_networks}{max} = $to_return->{data}{all_networks}{max} + $pools->{$pool_key}{max}; + + push( @{ $to_return->{data}{pools} }, $pools->{$pool_key} ); +} ## end foreach my $pool_key ( keys( %{$pools} ) ) +$to_return->{data}{all_networks}{percent} + = ( $to_return->{data}{all_networks}{cur} / $to_return->{data}{all_networks}{max} ) * 100; + +## +## +## put the networks section together +## +## +my @network_keys = keys( %{$networks} ); +if ( !defined( $network_keys[0] ) ) { + foreach my $pool_key ( keys( %{$pools} ) ) { + $networks->{ $pools->{$pool_key}{cidr} } = [$pool_key]; + } + @network_keys = keys( %{$networks} ); +} +foreach my $network (@network_keys) { + my $cur = 0; + my $max = 0; + foreach my $pool_name ( @{ $networks->{$network} } ) { + $cur = $cur + $pools->{$pool_name}{cur}; + $max = $max + $pools->{$pool_name}{max}; + } + my $percent = ( $cur / $max ) * 100; + push( + @{ $to_return->{data}{networks} }, + { + cur => $cur, + max => $max, + network => $network, + percent => $percent, + pools => $networks->{$network}, + } + ); +} ## end foreach my $network (@network_keys) + +## +## +## handle printing the output +## +## my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{p} ) { $json->pretty; From 0704b427775afff4adcdd1cdfbb3782528213cd2 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 4 Oct 2023 19:30:54 -0500 Subject: [PATCH 392/497] add -w to dhcp (#492) --- snmp/dhcp | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/snmp/dhcp b/snmp/dhcp index 66466c2d8..461790389 100755 --- a/snmp/dhcp +++ b/snmp/dhcp @@ -58,6 +58,10 @@ for reporting purposes. Default is 'cidr'. +=head2 -w + +Write the the output to this file. + =head1 Return JSON Data Hash - .all_networks.cur :: Current leases for all networks @@ -106,9 +110,10 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Net::ISC::DHCPd::Leases; use Net::ISC::DHCPd::Config; +use File::Slurp; my %opts; -getopts( 'l:Zdpc:n:', \%opts ); +getopts( 'l:Zdpc:n:w:', \%opts ); if ( !defined( $opts{n} ) ) { $opts{n} = 'cidr'; @@ -533,4 +538,8 @@ if ( $opts{Z} ) { print $toReturn; +if ($opts{w}) { + write_file($opts{w}, $toReturn); +} + exit; From 31f8315c622a1e384f712871efa1f3c130a99c2c Mon Sep 17 00:00:00 2001 From: Tony Murray Date: Sat, 7 Oct 2023 21:36:37 -0500 Subject: [PATCH 393/497] Add pacman packages support (#493) --- agent-local/pacman | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100755 agent-local/pacman diff --git a/agent-local/pacman b/agent-local/pacman new file mode 100755 index 000000000..d0b4f4ae0 --- /dev/null +++ b/agent-local/pacman @@ -0,0 +1,22 @@ +#!/bin/bash +# Cache the file for 30 minutes +# If you want to override this, put the command in cron. +# We cache because it is a 1sec delay, which is painful for the poller +if [ -x /usr/bin/pacman ]; then + DATE=$(date +%s) + FILE=/var/cache/librenms/agent-local-pacman + + [ -d /var/cache/librenms ] || mkdir -p /var/cache/librenms + + if [ ! -e $FILE ]; then + pacman -Qi | awk '/^Name/{name=$3} /^Version/{version=$3} /^Architecture/{arch=$3} /^Installed Size/{print name, version, arch, $4$5}' > $FILE + fi + FILEMTIME=$(stat -c %Y $FILE) + FILEAGE=$(($DATE-$FILEMTIME)) + if [ $FILEAGE -gt 1800 ]; then + pacman -Qi | awk '/^Name/{name=$3} /^Version/{version=$3} /^Architecture/{arch=$3} /^Installed Size/{print name, version, arch, $4$5}' > $FILE + fi + echo "<<>>" + cat $FILE +fi + From c5f02e848d2420098e84286b56e0d81eb2e3cf23 Mon Sep 17 00:00:00 2001 From: calvin_thefreak <5560381+calvinthefreak@users.noreply.github.com> Date: Wed, 8 Nov 2023 05:16:49 +0100 Subject: [PATCH 394/497] Quick and dirty fix for snmp extend to respond correctly. (#494) - Added "grep -v" to the output command. - Added a note for Users that don't understand why LibreNMS can't get the values when snmp user is not in docker group. --- snmp/mailcow-dockerized-postfix | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/snmp/mailcow-dockerized-postfix b/snmp/mailcow-dockerized-postfix index 8fd536481..473321e6c 100644 --- a/snmp/mailcow-dockerized-postfix +++ b/snmp/mailcow-dockerized-postfix @@ -20,6 +20,9 @@ # please adjust librenms_poller_interval according to your LibreNMS setup - default to 5 minutes # requirements: mailcow-dockerized and pflogsumm # +# Note to users that struggle with the setup: Make sure, that your SNMP Daemon can use the docker command +# So please make sure, that the e.G. Debian-snmp user is added to the docker group! +# import json import re @@ -47,7 +50,7 @@ def cli_command(): + libre_to_mcd_postfix(librenms_poller_interval) + "m " + cli_get_docker_container() - + "| pflogsumm --smtpd-stats" + + "| pflogsumm --smtpd-stats 2>&1 | grep -v 'Use of uninitialized value'" ) return cli_part From ca8e3b5addba0add6651254c01e731371694b074 Mon Sep 17 00:00:00 2001 From: Garcia MICHEL Date: Wed, 8 Nov 2023 05:21:53 +0100 Subject: [PATCH 395/497] Add OpenMediaVault detection to distro script (#484) --- snmp/distro | 3 +++ 1 file changed, 3 insertions(+) diff --git a/snmp/distro b/snmp/distro index da06f943a..843eef062 100755 --- a/snmp/distro +++ b/snmp/distro @@ -78,6 +78,9 @@ elif [ "${OS}" = "Linux" ] ; then if [ -f /etc/dogtag ]; then DIST=$(cat /etc/dogtag) fi + if [ -f /usr/sbin/omv-sysinfo ]; then + DIST="${DIST}/OpenMediaVault $(/usr/sbin/omv-sysinfo 00-omv-version | grep Release | cut -d: -f2 | sed 's/\s//g')" + fi elif [ -f /etc/gentoo-release ] ; then DIST="Gentoo" From 4b03a1f07e1f1c4f313a704d56d241cdcaaa12e0 Mon Sep 17 00:00:00 2001 From: bnerickson Date: Tue, 7 Nov 2023 20:25:48 -0800 Subject: [PATCH 396/497] Adding Socket Statistics Script (#486) * Adding Socket Statistics Script * Adding the no-header argument to the ss command. Fixing a bug where the unknown netid type was discarded before it was converted from question-marks to unknown * Now that headers are removed from the ss command, moving the datastructure logic out of command execution logic so it executes at least once even if there's no output. * Moved 'always-on' global arguments into a GLOBAL_ARGS constant list. --- snmp/ss.py | 370 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 370 insertions(+) create mode 100644 snmp/ss.py diff --git a/snmp/ss.py b/snmp/ss.py new file mode 100644 index 000000000..638a24079 --- /dev/null +++ b/snmp/ss.py @@ -0,0 +1,370 @@ +#!/usr/bin/env python +# +# Name: Socket Statistics Script +# Author: bnerickson w/SourceDoctor's certificate.py script forming +# the base of the vast majority of this one. +# Version: 1.0 +# Description: This is a simple script to parse "ss" output for ingestion into +# LibreNMS via the ss application. +# Installation: +# 1. Copy this script to /etc/snmp/ and make it executable: +# chmod +x /etc/snmp/ss.py +# 2. Edit your snmpd.conf and include: +# extend ss /etc/snmp/ss.py +# 3. (Optional) Create a /etc/snmp/ss.json file and specify: +# a.) "ss_cmd" - String path to the ss binary: ["/sbin/ss"] +# b.) "socket_types" - A comma-delimited list of socket types to include. +# The following socket types are valid: dccp, icmp6, +# mptcp, p_dgr, p_raw, raw, sctp, tcp, ti_dg, ti_rd, +# ti_sq, ti_st, u_dgr, u_seq, u_str, udp, unknown, +# v_dgr, v_dgr, xdp. Please note that the "unknown" +# socket type is represented in ss output with the +# netid "???". Please also note that the p_dgr and +# p_raw socket types are specific to the "link" +# address family; the ti_dg, ti_rd, ti_sq, and ti_st +# socket types are specific to the "tipc" address +# family; the u_dgr, u_seq, and u_str socket types +# are specific to the "unix" address family; and the +# v_dgr and v_str socket types are specific to the +# "vsock" address family. Filtering out the parent +# address families for the aforementioned will also +# filter out their specific socket types. Specifying +# "all" includes all of the socket types. For +# example: to include only tcp, udp, icmp6 sockets, +# you would specify "tcp,udp,icmp6": ["all"] +# c.) "addr_families" - A comma-delimited list of address families to +# include. The following families are valid: +# inet, inet6, link, netlink, tipc, unix, vsock. As +# mentioned above under (b), filtering out the link, +# tipc, unix, or vsock address families will also +# filter out their respective socket types. +# Specifying "all" includes all of the families. +# For example: to include only inet and inet6 +# families, you would specify "inet,inet6": ["all"] +# ``` +# { +# "ss_cmd": "/sbin/ss", +# "socket_types": "all", +# "addr_families": "all" +# } +# ``` +# 4. Restart snmpd and activate the app for desired host. + +import json +import subprocess +import sys + +CONFIG_FILE = "/etc/snmp/ss.json" +SOCKET_MAPPINGS = { + "dccp": { + "args": ["--dccp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "inet": { + "args": ["--family", "inet"], + "netids": ["dccp", "mptcp", "raw", "sctp", "tcp", "udp", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "inet6": { + "args": ["--family", "inet6"], + "netids": ["dccp", "icmp6", "mptcp", "raw", "sctp", "tcp", "udp", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "link": { + "args": ["--family", "link"], + "netids": ["p_dgr", "p_raw", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "mptcp": { + "args": ["--mptcp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "netlink": { + "args": ["--family", "netlink"], + "netids": [], + "addr_family": True, + "socket_type": False, + }, + "raw": { + "args": ["--raw"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "sctp": { + "args": ["--sctp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "tcp": { + "args": ["--tcp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "tipc": { + "args": ["--family", "tipc"], + "netids": ["ti_dg", "ti_rd", "ti_sq", "ti_st", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "udp": { + "args": ["--udp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, + "unix": { + "args": ["--family", "unix"], + "netids": ["u_dgr", "u_seq", "u_str"], + "addr_family": True, + "socket_type": False, + }, + "vsock": { + "args": ["--family", "vsock"], + "netids": ["v_dgr", "v_str", "unknown"], + "addr_family": True, + "socket_type": False, + }, + "xdp": { + "args": ["--xdp"], + "netids": [], + "addr_family": False, + "socket_type": True, + }, +} +GLOBAL_ARGS = ["--all", "--no-header"] +ADDR_FAMILY_ALLOW_LIST = [] +SOCKET_ALLOW_LIST = [] + +# Populate the state allow lists. +for gentype_key, gentype_values in SOCKET_MAPPINGS.items(): + if gentype_values["socket_type"]: + SOCKET_ALLOW_LIST.append(gentype_key) + if gentype_values["addr_family"]: + ADDR_FAMILY_ALLOW_LIST.append(gentype_key) + for gentype_netid in gentype_values["netids"]: + SOCKET_ALLOW_LIST.append(gentype_netid) + +SS_CMD = ["/sbin/ss"] + + +def error_handler(error_name, err): + """ + error_handler(): Common error handler for config/output parsing and + command execution. + Inputs: + error_name: String describing the error handled. + err: The error message in its entirety. + Outputs: + None + """ + output_data = { + "errorString": f"{error_name}: '{err}'", + "error": 1, + "version": 1, + "data": [], + } + print(json.dumps(output_data)) + sys.exit(1) + + +def config_file_parser(): + """ + config_file_parser(): Parses the config file (if it exists) and extracts the + necessary parameters. + + Inputs: + None + Outputs: + ss_cmd: The full ss command to execute. + socket_allow_list: A list of the socket types to parse output for. + """ + ss_cmd = SS_CMD.copy() + socket_allow_list = SOCKET_ALLOW_LIST.copy() + addr_family_allow_list = ADDR_FAMILY_ALLOW_LIST.copy() + + # Load configuration file if it exists + try: + with open(CONFIG_FILE, "r", encoding="utf-8") as json_file: + config_file = json.load(json_file) + ss_cmd = [config_file["ss_cmd"]] + socket_allow_list_clean = list( + map(str.lower, config_file["socket_types"].split(",")) + ) + addr_family_allow_list_clean = list( + map(str.lower, config_file["addr_families"].split(",")) + ) + if "all" not in socket_allow_list_clean: + socket_allow_list = socket_allow_list_clean + if "all" not in addr_family_allow_list_clean: + addr_family_allow_list = addr_family_allow_list_clean + except FileNotFoundError: + pass + except (KeyError, PermissionError, OSError, json.decoder.JSONDecodeError) as err: + error_handler("Config File Error", err) + + # Verify the socket types specified by the user are valid. + err = "" + for socket_type in socket_allow_list: + if socket_type in SOCKET_ALLOW_LIST: + continue + if not err: + err = "Invalid socket types specified: " + err += socket_type + " " + if err: + error_handler("Configuration File Error", err.strip()) + + # Verify the address families specified by the user are valid. + err = "" + for addr_family in addr_family_allow_list: + if addr_family in ADDR_FAMILY_ALLOW_LIST: + continue + if not err: + err = "Invalid address families specified: " + err += addr_family + " " + if err: + error_handler("Configuration File Error", err.strip()) + + # Create and return full ss command and allow lists. + return ss_cmd, socket_allow_list, addr_family_allow_list + + +def command_executor(ss_cmd, socket_type): + """ + command_executor(): Execute the ss command and return the output. + + Inputs: + ss_cmd: The full ss command to execute. + socket_type: The type of socket to collect data for. + Outputs: + poutput: The stdout of the executed command (empty byte-string if error). + """ + ss_socket_cmd = ss_cmd.copy() + ss_socket_cmd.extend(SOCKET_MAPPINGS[socket_type]["args"]) + ss_socket_cmd.extend(GLOBAL_ARGS) + + try: + # Execute ss command + poutput = subprocess.check_output( + ss_socket_cmd, + stdin=None, + stderr=subprocess.PIPE, + ) + except (subprocess.CalledProcessError, OSError) as err: + error_handler("Command Execution Error", err) + return poutput + + +def socket_parser(line, gentype, ss_data, socket_allow_list): + """ + socket_parser(): Parses a socket line for its current status. + That status type is added to the global ss_data + variable if it does not exist or incremented if + it does. The totals for the socket type are + incremented as well. + + Inputs: + line: The sockets's status line from the ss stdout. + gentype: The socket or address family to parse data for. + ss_data: All of the socket data as a dictionary. + socket_allow_list: List of sockets to parse data for. + Outputs: + None + """ + line_parsed = line.strip().split() + + netid = None + state = None + + try: + if SOCKET_MAPPINGS[gentype]["netids"]: + netid = line_parsed[0] + state = line_parsed[1] + else: + state = line_parsed[0] + except IndexError as err: + error_handler("Command Output Parsing Error", err) + + if SOCKET_MAPPINGS[gentype]["netids"]: + # Special case to convert the question-marks symbol + # to a safe string. + if netid == "???": + netid = "unknown" + + # Omit filtered sockets from the address families. + if netid not in socket_allow_list: + return ss_data + + ss_data[netid][state] = ( + 1 if state not in ss_data[netid] else (ss_data[netid][state] + 1) + ) + ss_data[netid]["TOTAL"] = ( + 1 if "TOTAL" not in ss_data[netid] else (ss_data[netid]["TOTAL"] + 1) + ) + else: + ss_data[state] = 1 if state not in ss_data else (ss_data[state] + 1) + ss_data["TOTAL"] = 1 if "TOTAL" not in ss_data else (ss_data["TOTAL"] + 1) + + return ss_data + + +def main(): + """ + main(): main function that delegates config file parsing, command execution, + and socket stdout parsing. Then it prints out the expected json output + for the ss application. + + Inputs: + None + Outputs: + None + """ + output_data = {"errorString": "", "error": 0, "version": 1, "data": {}} + + # Parse configuration file. + ss_cmd, socket_allow_list, addr_family_allow_list = config_file_parser() + + # Execute ss command for socket types. + for gentype in list(SOCKET_MAPPINGS.keys()): + # Skip socket types and address families disabled by the user. + if ( + SOCKET_MAPPINGS[gentype]["socket_type"] and gentype not in socket_allow_list + ) or ( + SOCKET_MAPPINGS[gentype]["addr_family"] + and gentype not in addr_family_allow_list + ): + continue + + # Build the initial output_data datastructures. + output_data["data"][gentype] = {} + for netid in SOCKET_MAPPINGS[gentype]["netids"]: + # Skip the netid if the socket is not allowed. + if netid not in socket_allow_list: + continue + output_data["data"][gentype][netid] = {} + + for line in command_executor(ss_cmd, gentype).decode("utf-8").split("\n"): + if not line: + continue + + output_data["data"][gentype] = socket_parser( + line, + gentype, + output_data["data"][gentype], + socket_allow_list, + ) + + print(json.dumps(output_data)) + + +if __name__ == "__main__": + main() From 9979517470c579f06821027d0170cd17f632566c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Nov 2023 04:33:41 -0600 Subject: [PATCH 397/497] borg backup extend (#495) --- snmp/borgbackup | 446 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 446 insertions(+) create mode 100755 snmp/borgbackup diff --git a/snmp/borgbackup b/snmp/borgbackup new file mode 100755 index 000000000..d20c49ce7 --- /dev/null +++ b/snmp/borgbackup @@ -0,0 +1,446 @@ +#!/usr/bin/env perl + +#Copyright (c) 2023, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +=head1 NAME + +borgbackup - LibreNMS JSON SNMP extend for gathering backups for borg + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +borgbackup [B<-c> ] [B<-o> ] + +borgbackup [B<--help>|B<-h>] + +borgbackup [B<--version>|B<-v>] + +=head1 DESCRIPTION + +This uses 'borg info $repo --json' to fetch info on the specified borg repos +and write the info out to files. + +The information is then writen out two two files under the output directory. + + - extend_return :: This file contains the data for the extend in + gzip+base64 compressed format if applicable. + + - pretty :: Pretty printed and sorted JSON. + +This is done for three reasons. The first is SNMPD and the users with read perms +for the repos are likely to be different. The second is lock time out, even with +1 second, means the command likely won't complete in a timely manner for larger +repos. + +For SNMPD generally going to be setup like this. + + extend borgbackup /bin/cat /var/cache/borgbackup_extend + +Then the extend is set to be ran via cron. + + */5 * * * * /etc/snmp/extends/borgbackup + +=head1 FLAGS + +=head2 -c + +The config file to use for the extend. + +Default :: /usr/local/etc/borgbackup_extend.ini + +=head2 -o + +The output directory write the pretty JSON file to and the file to use +for the SNMP extend. + +Default :: /var/cache/borgbackup_extend + +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + +=head1 CONFIG + +The config file is a ini file and handled by L. + + - mode :: single or multi, for if this is a single repo or for + multiple repos. + - Default :: single + + - repo :: Directory for the borg backup repo. + - Default :: undef + + - passphrase :: Passphrase for the borg backup repo. + - Default :: undef + + - passcommand :: Passcommand for the borg backup repo. + - Default :: undef + +For single repos all those variables are in the root section of the config, +so lets the repo is at '/backup/borg' with a passphrase of '1234abc'. + + repo=/backup/borg + repo=1234abc + +For multi, each section outside of the root represents a repo. So if there is +'/backup/borg1' with a passphrase of 'foobar' and '/backup/derp' with a passcommand +of 'pass show backup' it would be like below. + + mode=multi + + [borg1] + repo=/backup/borg1 + passphrase=foobar + + [derp] + repo=/backup/derp + passcommand=pass show backup + +If 'passphrase' and 'passcommand' are both specified, then passcommand is used. + +=head1 JSON RETURN + +The return is a LibreNMS JSON style SNMP extend as defined at +L + +The following key info is relevant to the .data . + + - .mode :: The mode it was ran in, either single or multi. + +Totaled info is in the hash .totals. + + - .totals.errored :: Total number of repos that info could not be fetched for. + - Type :: repos + + - .totals.locked :: Total number of locked repos + - Type :: repos + + - .totals.locked_for :: Longest time any repo has been locked. + - Type :: seconds + + - .totals.time_since_last_modified :: Largest time - mtime for the repo directory + - Type :: seconds + + - .total.total_chunks :: Total number of checks between all repos. + - Type :: chunks + + - .total.total_csize :: Total compressed size of all archives in all repos. + - Type :: bytes + + - .total.total_size :: Total uncompressed size of all archives in all repos. + - Type :: bytes + + - .total.total_unique_chunks :: Total number of unique chuckes in all repos. + - Type :: chunks + + - .total.unique_csize :: Total deduplicated size of all archives in all repos. + - Type :: bytes + + - .total.unique_size :: Total number of chunks in all repos. + - Type :: chunks + +Each repo then has it's own hash under .repo . + + - .repo.$repo.error :: If defined, this is the error encounted when + attempting to get repo info. + - Type :: string + + - .repo.$repo.locked_for :: How long the repo has been locked for if + locked. If it is not locked this is undef. + - Type :: seconds + + - .repo.$repo.time_since_last_modified :: time - mtime for the repo directory + - Type :: seconds + + - .repo.$repo.total_chunks :: Total number of checks for the repo. + - Type :: chunks + + - .repo.$repo.total_csize :: Total compressed size of all archives for the repo. + - Type :: bytes + + - .repo.$repo.total_size :: Total uncompressed size of all archives the repo. + - Type :: bytes + + - .repo.$repo.total_unique_chunks :: Total number of unique chuckes the repo. + - Type :: chunks + + - .repo.$repo.unique_csize :: Total deduplicated size of all archives the repo. + - Type :: bytes + + - .repo.$repo.unique_size :: Total number of chunks in the repo. + - Type :: chunks + +=cut + +use strict; +use warnings; +use Config::Tiny; +use JSON; +use Getopt::Long; +use File::Slurp; +use File::Path qw(make_path); +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use String::ShellQuote; +use Pod::Usage; + +our $output_dir = '/var/cache/borgbackup_extend'; +my $config_file = '/usr/local/etc/borgbackup_extend.ini'; +my $version; +my $help; +GetOptions( + 'c=s' => \$config_file, + 'o=s' => \$output_dir, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +# save the return +sub finish { + my (%opts) = @_; + + if ( !-e $output_dir ) { + make_path($output_dir) or die( 'could not create the output dir, "' . $output_dir . '",' ); + } elsif ( -e $output_dir && !-d $output_dir ) { + die( '"' . $output_dir . '" exists, but is not a directory' ); + } + + my $j = JSON->new; + + my $return_string = $j->encode( $opts{to_return} ); + + my $compressed_string; + gzip \$return_string => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + if ( length($compressed) > length($return_string) ) { + write_file( $output_dir . '/extend_return', $return_string ); + } else { + write_file( $output_dir . '/extend_return', $compressed ); + } + + $j->pretty(1); + $j->canonical(1); + $return_string = $j->encode( $opts{to_return} ); + + write_file( $output_dir . '/pretty', $return_string ); + + print $return_string; + + exit $opts{to_return}->{error}; +} ## end sub finish + +my $to_return = { + data => { + mode => 'single', + totals => { + total_chunks => 0, + total_csize => 0, + total_size => 0, + total_unique_chunks => 0, + unique_csize => 0, + unique_size => 0, + locked => 0, + time_since_last_modified => undef, + errored => 0, + locked_for => undef, + }, + repos => {}, + }, + version => 1, + error => 0, + errorString => '', +}; + +# attempt to read in the config +my $config; +eval { + my $raw_config = read_file($config_file); + ($config) = Config::Tiny->read_string($raw_config); +}; +if ($@) { + $to_return->{error} = 1; + $to_return->{errorString} = 'Failed reading config file "' . $config_file . '"... ' . $@; + finish( to_return => $to_return ); +} + +if ( !defined( $config->{_}{mode} ) ) { + $config->{_}{mode} = 'single'; +} elsif ( $config->{_}{mode} ne 'single' && $config->{_}{mode} ne 'multi' ) { + $to_return->{error} = 2; + $to_return->{errorString} = '"' . $config->{_}{mode} . '" mode is not set to single or multi'; + finish( to_return => $to_return ); +} + +# get a list of repos to use +my @repos; +if ( $config->{_}{mode} eq 'single' ) { + # if single, just create a single repo + push( @repos, 'single' ); + $config->{single} = {}; + + # make sure we have passcommand or passphrase with passphrase being used as the default + if ( !defined( $config->{_}{passcommand} ) && !defined( $config->{_}{passphrase} ) ) { + $to_return->{error} = 3; + $to_return->{errorString} = 'Neither passcommand or passphrase defined'; + finish( to_return => $to_return ); + } elsif ( $config->{_}{passphrase} ) { + $config->{single}{passphrase} = $config->{_}{passphrase}; + } elsif ( $config->{_}{passcommand} ) { + $config->{single}{passcommand} = $config->{_}{passcommand}; + } + + # make sure have a repo specified + if ( !defined( $config->{_}{repo} ) ) { + $to_return->{error} = 4; + $to_return->{errorString} = 'repo is not defined'; + finish( to_return => $to_return ); + } + $config->{single}{repo} = $config->{_}{repo}; + +} else { + # we don't want _ as that is the root of the ini file + @repos = grep( !/^\_$/, keys( %{$config} ) ); +} + +my @totals + = ( 'total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size', 'locked' ); +my @stats = ( 'total_chunks', 'total_csize', 'total_size', 'total_unique_chunks', 'unique_csize', 'unique_size' ); + +foreach my $repo (@repos) { + my $process = 1; + + # unset borg pass bits + delete( $ENV{BORG_PASSPHRASE} ); + delete( $ENV{BORG_PASSCOMMAND} ); + + my $repo_info = { + total_chunks => 0, + total_csize => 0, + total_size => 0, + total_unique_chunks => 0, + unique_csize => 0, + unique_size => 0, + locked => 0, + time_since_last_modified => undef, + error => undef, + locked_for => undef, + }; + + if ( !defined( $config->{$repo}{passcommand} ) && !defined( $config->{$repo}{passphrase} ) ) { + $to_return->{error} = 3; + $to_return->{errorString} + = $to_return->{errorString} . "\n" . 'Neither passcommand or passphrase defined for ' . $repo; + $process = 0; + } + + if ( !defined( $config->{$repo}{repo} ) ) { + $to_return->{error} = 4; + $to_return->{errorString} = $to_return->{errorString} . "\n" . 'repo is not defined for ' . $repo; + $process = 0; + } + + if ($process) { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{$repo}{repo} ); + + my $time_diff = time - $mtime; + $repo_info->{time_since_last_modified} = $time_diff; + + # if we don't have a largest time diff or if it is larger than then + # the old one save the time diff + if ( !defined( $to_return->{data}{totals}{time_since_last_modified} ) + || $to_return->{data}{totals}{time_since_last_modified} < $time_diff ) + { + $to_return->{data}{totals}{time_since_last_modified} = $time_diff; + } + + if ( defined( $config->{$repo}{passcommand} ) ) { + $ENV{BORG_PASSCOMMAND} = $config->{$repo}{passcommand}; + } else { + $ENV{BORG_PASSPHRASE} = $config->{$repo}{passphrase}; + } + + my $command = 'borg info ' . shell_quote( $config->{$repo}{repo} ) . ' --json 2>&1'; + my $output_raw = `$command`; + + my $info; + eval { $info = decode_json($output_raw); }; + if ($@) { + my $error = $@; + if ( $output_raw =~ /lock.*lock\.exclusive/ ) { + $repo_info->{locked} = 1; + + my $lock_file = $config->{$repo}{repo} . '/lock.exclusive'; + ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat($lock_file); + $repo_info->{locked_for} = time - $ctime; + } else { + $repo_info->{error} = $error; + } + } else { + if ( defined( $info->{cache} ) && defined( $info->{cache}{stats} ) ) { + for my $stat (@stats) { + $repo_info->{$stat} = $info->{cache}{stats}{$stat}; + } + } + } + + for my $total (@totals) { + $to_return->{data}{totals}{$total} = $to_return->{data}{totals}{$total} + $repo_info->{$total}; + } + + if ( defined( $repo_info->{error} ) ) { + $to_return->{data}{totals}{errored}++; + } + + if ( !defined( $to_return->{data}{totals}{locked_for} ) + || $to_return->{data}{totals}{locked_for} < $repo_info->{locked_for} ) + { + $to_return->{data}{totals}{locked_for} = $repo_info->{locked_for}; + } + } ## end if ($process) + + $to_return->{data}{repos}{$repo} = $repo_info; +} ## end foreach my $repo (@repos) + +finish( to_return => $to_return ); From 335629dd6208a614558908b27afa26449447c2d5 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 19 Nov 2023 15:31:32 -0600 Subject: [PATCH 398/497] borgbackup mtime fix for repo (#496) --- snmp/borgbackup | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/borgbackup b/snmp/borgbackup index d20c49ce7..c6c88fb11 100755 --- a/snmp/borgbackup +++ b/snmp/borgbackup @@ -381,7 +381,7 @@ foreach my $repo (@repos) { if ($process) { my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) - = stat( $config->{$repo}{repo} ); + = stat( $config->{$repo}{repo} . '/nonce' ); my $time_diff = time - $mtime; $repo_info->{time_since_last_modified} = $time_diff; From 0bea0a69f4010daa2c2a33a76e1ebc05b93de4d4 Mon Sep 17 00:00:00 2001 From: Dan Langille Date: Sat, 16 Dec 2023 21:53:04 -0500 Subject: [PATCH 399/497] Update URL for ZFS (#498) Correct the URL --- snmp/zfs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/zfs b/snmp/zfs index d80e73e2e..32800e688 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -4,7 +4,7 @@ This is a SNMP extend for ZFS for use with LibreNMS. -For more information, see L. +For more information, see L. =head1 SWITCHES From 14795a7a062bcc760cc13ef6b0979802882f5b83 Mon Sep 17 00:00:00 2001 From: Peter Childs Date: Sun, 17 Dec 2023 13:26:21 +1030 Subject: [PATCH 400/497] ensure returned values do not exceed max of RRD file (#497) --- snmp/mysql | 2 ++ 1 file changed, 2 insertions(+) diff --git a/snmp/mysql b/snmp/mysql index 530637352..d7ac4f09a 100755 --- a/snmp/mysql +++ b/snmp/mysql @@ -158,6 +158,8 @@ if (!isset($called_by_script_server)) { $output[] = $item; } list($short, $val) = explode(":", $item); + # ensure returned values do not exceed max limits in RRD + $val = $val % ( 124999999999 + 1 ); echo(strtolower($short).":".strtolower($val)."\n"); } debug(array("Final result", $output)); From 7a392acb356f42e3fe6d1ed050501be0552d6b53 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 19 Dec 2023 21:10:57 -0600 Subject: [PATCH 401/497] fix for issues/501 and do assorted cleanup while there (#502) --- snmp/zfs | 197 ++++++++++++++++++++++++++++++++----------------------- 1 file changed, 116 insertions(+), 81 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 32800e688..6a14acdad 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -1,8 +1,14 @@ #!/usr/bin/env perl -=head1 DESCRIPTION +=head1 NAME + +zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS -This is a SNMP extend for ZFS for use with LibreNMS. +=head1 VERSION + +0.1.0 + +=head1 DESCRIPTION For more information, see L. @@ -29,11 +35,11 @@ in the return. The requirements may be installed via CPAN like below for Linux. - apt-get install cpanminus zlib1g-dev + apt-get install cpanminus File::Slurp MIME::Base64 JSON Or on FreeBSD via pkg... - pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-Gzip-Faster + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 =cut @@ -62,31 +68,59 @@ Or on FreeBSD via pkg... # Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska # for zfs-stats and figuring out the math for all the stats +# +# Thanks to dlangille for pointing out the issues on 14 and Bobzikwick figuring out the fix in issues/501 use strict; use warnings; use JSON; -use Getopt::Std; +use Getopt::Long; use File::Slurp; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; -$Getopt::Std::STANDARD_HELP_VERSION = 1; +#$Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "FreeBSD ZFS v3 stats extend 0.0.1\n"; + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); } sub main::HELP_MESSAGE { - + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } #this will be dumped to json at the end my %tojson; #gets the options -my %opts = (); -getopts( 'pbs', \%opts ); +my %opts; +my $opts_p; +my $opts_b; +my $opts_s; +my $version; +my $help; +#getopts( 'pbs', \%opts ); +GetOptions( + p => \$opts_p, + b => \$opts_b, + s => \$opts_s, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); +$opts{p} = $opts_p; +$opts{b} = $opts_b; +$opts{s} = $opts_s; + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} #process each pool and shove them into JSON my $zpool_output = `/sbin/zpool list -pH`; @@ -118,39 +152,33 @@ while ( defined( $pools[$pools_int] ) ) { $newPool{dedup}, $newPool{health}, $newPool{altroot} ) = split( /\,/, $pool ); - if ($opts{s}) { + if ( $opts{s} ) { $newPool{status} = `zpool status $newPool{name}`; } if ( $newPool{health} eq 'ONLINE' ) { $newPool{health} = 0; $tojson{online}++; - } - elsif ( $newPool{health} eq 'DEGRADED' ) { + } elsif ( $newPool{health} eq 'DEGRADED' ) { $newPool{health} = 1; $tojson{health} = 0; $tojson{degraded}++; - } - elsif ( $newPool{health} eq 'OFFLINE' ) { + } elsif ( $newPool{health} eq 'OFFLINE' ) { $newPool{health} = 2; $tojson{offline}++; - } - elsif ( $newPool{health} eq 'FAULTED' ) { + } elsif ( $newPool{health} eq 'FAULTED' ) { $newPool{health} = 3; $tojson{health} = 0; $tojson{faulted}++; - } - elsif ( $newPool{health} eq 'UNAVAIL' ) { + } elsif ( $newPool{health} eq 'UNAVAIL' ) { $newPool{health} = 4; $tojson{health} = 0; $tojson{unavail}++; - } - elsif ( $newPool{health} eq 'REMOVED' ) { + } elsif ( $newPool{health} eq 'REMOVED' ) { $newPool{health} = 5; $tojson{health} = 0; $tojson{removed}++; - } - else { + } else { $newPool{health} = 6; $tojson{health} = 0; $tojson{unknown}++; @@ -188,7 +216,7 @@ while ( defined( $pools[$pools_int] ) ) { push( @toShoveIntoJSON, \%newPool ); $pools_int++; -} +} ## end while ( defined( $pools[$pools_int] ) ) $tojson{pools} = \@toShoveIntoJSON; # @@ -209,10 +237,9 @@ if ( $^O eq 'freebsd' ) { $var =~ s/^.*\.arcstats\.//; $stats_stuff->{$var} = $val; } - } + } ## end foreach my $stat (@sysctls_pull) -} -elsif ( $^O eq 'linux' ) { +} elsif ( $^O eq 'linux' ) { my @arcstats_lines = read_file('/proc/spl/kstat/zfs/arcstats'); foreach my $line (@arcstats_lines) { chomp($line); @@ -222,30 +249,30 @@ elsif ( $^O eq 'linux' ) { } # does not seem to exist for me, but some of these don't seem to be created till needed -if ( !defined( $stats_stuff->{"recycle_miss"} ) ) { - $stats_stuff->{"recycle_miss"} = 0; +if ( !defined( $stats_stuff->{recycle_miss} ) ) { + $stats_stuff->{recycle_miss} = 0; } ## ## ARC misc ## -$tojson{deleted} = $stats_stuff->{"deleted"}; -$tojson{evict_skip} = $stats_stuff->{"evict_skip"}; -$tojson{mutex_skip} = $stats_stuff->{'mutex_miss'}; -$tojson{recycle_miss} = $stats_stuff->{"recycle_miss"}; +$tojson{deleted} = $stats_stuff->{deleted}; +$tojson{evict_skip} = $stats_stuff->{evict_skip}; +$tojson{mutex_skip} = $stats_stuff->{mutex_miss}; +$tojson{recycle_miss} = $stats_stuff->{recycle_miss}; ## ## ARC size ## -my $target_size_percent = $stats_stuff->{"c"} / $stats_stuff->{"c_max"} * 100; -my $arc_size_percent = $stats_stuff->{"size"} / $stats_stuff->{"c_max"} * 100; -my $target_size_adaptive_ratio = $stats_stuff->{"c"} / $stats_stuff->{"c_max"}; -my $min_size_percent = $stats_stuff->{"c_min"} / $stats_stuff->{"c_max"} * 100; - -$tojson{arc_size} = $stats_stuff->{"size"}; -$tojson{target_size_max} = $stats_stuff->{"c_max"}; -$tojson{target_size_min} = $stats_stuff->{"c_min"}; -$tojson{target_size} = $stats_stuff->{"c"}; +my $target_size_percent = $stats_stuff->{c} / $stats_stuff->{c_max} * 100; +my $arc_size_percent = $stats_stuff->{size} / $stats_stuff->{c_max} * 100; +my $target_size_adaptive_ratio = $stats_stuff->{c} / $stats_stuff->{c_max}; +my $min_size_percent = $stats_stuff->{c_min} / $stats_stuff->{c_max} * 100; + +$tojson{arc_size} = $stats_stuff->{size}; +$tojson{target_size_max} = $stats_stuff->{c_max}; +$tojson{target_size_min} = $stats_stuff->{c_min}; +$tojson{target_size} = $stats_stuff->{c}; $tojson{target_size_per} = $target_size_percent; $tojson{arc_size_per} = $arc_size_percent; $tojson{target_size_arat} = $target_size_adaptive_ratio; @@ -255,39 +282,47 @@ $tojson{min_size_per} = $min_size_percent; ## ARC size breakdown ## my $mfu_size; +if ( defined( $stats_stuff->{mfu_size} ) ) { + $mfu_size = $stats_stuff->{mfu_size}; +} my $recently_used_percent; my $frequently_used_percent; -if ( $stats_stuff->{"size"} >= $stats_stuff->{"c"} ) { - $mfu_size = $stats_stuff->{"size"} - $stats_stuff->{"p"}; - $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"size"} * 100; - $frequently_used_percent = $mfu_size / $stats_stuff->{"size"} * 100; +if ( !defined( $stats_stuff->{p} ) && defined( $stats_stuff->{mfu_size} ) ) { + $stats_stuff->{p} = $stats_stuff->{size} - $stats_stuff->{mfu_size}; } -else { - $mfu_size = $stats_stuff->{"c"} - $stats_stuff->{"p"}; - $recently_used_percent = $stats_stuff->{"p"} / $stats_stuff->{"c"} * 100; - $frequently_used_percent = $mfu_size / $stats_stuff->{"c"} * 100; +if ( $stats_stuff->{size} >= $stats_stuff->{c} ) { + if ( !defined($mfu_size) ) { + $mfu_size = $stats_stuff->{size} - $stats_stuff->{p}; + } + $recently_used_percent = $stats_stuff->{p} / $stats_stuff->{size} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{size} * 100; +} else { + if ( !defined($mfu_size) ) { + $mfu_size = $stats_stuff->{c} - $stats_stuff->{p}; + } + $recently_used_percent = $stats_stuff->{p} / $stats_stuff->{c} * 100; + $frequently_used_percent = $mfu_size / $stats_stuff->{c} * 100; } -$tojson{p} = $stats_stuff->{"p"}; +$tojson{p} = $stats_stuff->{p}; ## ## ARC efficiency ## -my $arc_hits = $stats_stuff->{"hits"}; -my $arc_misses = $stats_stuff->{"misses"}; -my $demand_data_hits = $stats_stuff->{"demand_data_hits"}; -my $demand_data_misses = $stats_stuff->{"demand_data_misses"}; -my $demand_metadata_hits = $stats_stuff->{"demand_metadata_hits"}; -my $demand_metadata_misses = $stats_stuff->{"demand_metadata_misses"}; -my $mfu_ghost_hits = $stats_stuff->{"mfu_ghost_hits"}; -my $mfu_hits = $stats_stuff->{"mfu_hits"}; -my $mru_ghost_hits = $stats_stuff->{"mru_ghost_hits"}; -my $mru_hits = $stats_stuff->{"mru_hits"}; -my $prefetch_data_hits = $stats_stuff->{"prefetch_data_hits"}; -my $prefetch_data_misses = $stats_stuff->{"prefetch_data_misses"}; -my $prefetch_metadata_hits = $stats_stuff->{"prefetch_metadata_hits"}; -my $prefetch_metadata_misses = $stats_stuff->{"prefetch_metadata_misses"}; - +my $arc_hits = $stats_stuff->{hits}; +my $arc_misses = $stats_stuff->{misses}; +my $demand_data_hits = $stats_stuff->{demand_data_hits}; +my $demand_data_misses = $stats_stuff->{demand_data_misses}; +my $demand_metadata_hits = $stats_stuff->{demand_metadata_hits}; +my $demand_metadata_misses = $stats_stuff->{demand_metadata_misses}; +my $mfu_ghost_hits = $stats_stuff->{mfu_ghost_hits}; +my $mfu_hits = $stats_stuff->{mfu_hits}; +my $mru_ghost_hits = $stats_stuff->{mru_ghost_hits}; +my $mru_hits = $stats_stuff->{mru_hits}; +my $prefetch_data_hits = $stats_stuff->{prefetch_data_hits}; +my $prefetch_data_misses = $stats_stuff->{prefetch_data_misses}; +my $prefetch_metadata_hits = $stats_stuff->{prefetch_metadata_hits}; +my $prefetch_metadata_misses = $stats_stuff->{prefetch_metadata_misses}; ## ## ARC efficiency, common ## @@ -315,8 +350,7 @@ if ( $prefetch_data_total != 0 ) { my $anon_hits_percent; if ( $anon_hits != 0 ) { $anon_hits_percent = $anon_hits / $arc_hits * 100; -} -else { +} else { $anon_hits_percent = 0; } @@ -395,34 +429,35 @@ $tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; ## my %head_hash; -$head_hash{'data'} = \%tojson; -$head_hash{'version'} = 3; -$head_hash{'error'} = 0; -$head_hash{'errorString'} = ''; +$head_hash{data} = \%tojson; +$head_hash{version} = 3; +$head_hash{error} = 0; +$head_hash{errorString} = ''; my $j = JSON->new; -if ( $opts{p} && ! $opts{b} ) { +if ( $opts{p} && !$opts{b} ) { $j->pretty(1); } my $return_string = $j->encode( \%head_hash ); -if ( !$opts{p} && ! $opts{b} ) { - print $return_string."\n"; +if ( !$opts{p} && !$opts{b} ) { + print $return_string. "\n"; exit 0; -}elsif (!$opts{b}) { +} elsif ( !$opts{b} ) { print $return_string; exit 0; } -my $compressed = encode_base64( gzip($return_string) ); +my $compressed_string; +gzip \$return_string => \$compressed_string; +my $compressed = encode_base64($compressed_string); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; if ( length($compressed) > length($return_string) ) { - print $return_string."\n"; -} -else { + print $return_string. "\n"; +} else { print $compressed; } From 75ef911f56e09a72bca1bdd483c029b2036b23eb Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 8 Jan 2024 16:59:01 -0600 Subject: [PATCH 402/497] add ifAlias (#503) --- snmp/ifAlias | 187 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 187 insertions(+) create mode 100755 snmp/ifAlias diff --git a/snmp/ifAlias b/snmp/ifAlias new file mode 100755 index 000000000..e31e09dce --- /dev/null +++ b/snmp/ifAlias @@ -0,0 +1,187 @@ +#!/bin/sh +# (c) 2013-2017, f0o@devilcode.org, olb@nebkha.net +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +DISTRO_BIN="/usr/bin/distro" +BASE='.1.3.6.1.2.1.31.1.1.1.18' +GET_TYPE="$1" +GET_OID="$2" + +UNAME="$(/usr/bin/uname)" + +if [ "$(echo "${UNAME}" | grep -ci 'bsd$')" -eq 1 ]; then + UNAME="BSD" +fi + +# cache ip link output +if [ "${UNAME}" = 'Linux' ]; then + IP_LINK="$(ip link)" +else + IFCONFIG="$(ifconfig)" +fi + +# Get interface id from GET_OID script parameter depending on the get type -g +# or -n. +# +# snmpd specify two behaviors: GETNEXT and GET. +# +# script -g +# +# : Should return OID value +# +# script -n +# +# : Should return next OID value +# +# Note that interface id are not necessarly following incrementally. +# We need tho find the next interface id (which is not necessarily n+1). +# +interface_id() +{ + N= + L= + ID="${GET_OID#"$BASE".}" + + case "$GET_TYPE" in + -g) + echo "$ID" + return 0 + ;; + -n) + if [ "$ID" = "$BASE" ] + then + if [ "${UNAME}" = 'Linux' ]; then + # find the first iface_id + echo "$IP_LINK" | grep -oE "^[0-9]+:" | head -n 1 | cut -d':' -f 1 + return 0 + else + echo "${IFCONFIG}" | head -n 1 | cut -d: -f 1 + return 0 + fi + else + # find the next iface_id + if [ "${UNAME}" = 'Linux' ]; then + for N in $(echo "$IP_LINK" | grep -oE "^[0-9]+:" | cut -d':' -f 1) + do + if [ "$L" = "$ID" ] || [ -z "$ID" ]; then + printf '%s' "$N" + return 0 + fi + L="$N" + done + else + for N in $(echo "${IFCONFIG}" | grep -E '^[A-Za-z]+' | cut -d: -f1 | cat -n -b | sed 's/^ *//' | sed 's/[\t\ ].*//'); do + if [ "$L" = "$ID" ] || [ -z "$ID" ]; then + printf '%s' "$N" + return 0 + fi + L="$N" + done + fi + fi + ;; + esac + return 1 +} + +interface_name() +{ + if [ "${UNAME}" = 'Linux' ]; then + echo "$IP_LINK" | grep -oE "^$1: [^:@ ]*" | cut -d " " -f 2 + else + echo "${IFCONFIG}" | grep -E '^[A-Za-z]+' | cut -d: -f1 | head -n "$1" | tail -n 1 + fi +} + +alias_from_interfaces_config_file() +{ + CONFIG_FILE= + + if [ -x "$DISTRO_BIN" ]; then + if [ "${UNAME}" = 'Linux' ]; then + DISTRO_VAR="$($DISTRO_BIN | cut -d " " -f 1)" + else + DISTRO_VAR="${UNAME}" + fi + + case "${DISTRO_VAR}" in + Debian) + CONFIG_FILE="/etc/network/interfaces" + ;; + Ubuntu) + CONFIG_FILE="/etc/network/interfaces" + ;; + Gentoo) + CONFIG_FILE="/etc/conf.d/net" + ;; + CentOS|RedHat|SuSE|Mandriva|Mandrake) + CONFIG_FILE="/etc/sysconfig/network-scripts/ifcfg-$1" + ;; + Archlinux) + CONFIG_FILE="/etc/conf.d/net-conf-$1" + ;; + BSD) + CONFIG_FILE="/etc/rc.conf" + ;; + esac + fi + if [ "$CONFIG_FILE" ]; then + # echo squashes possible multi line replies to a single line + FOUND_LINES="$(grep -i "^# $1:" $CONFIG_FILE | sed "s/^# $1: //i")" + if [ "$(echo "${FOUND_LINES}" | wc -l)" -ge 1 ]; then + echo "${FOUND_LINES}" + return 0 + fi + fi + if [ "${UNAME}" = "Linux" ] && [ -d '/etc/network/interfaces.d' ]; then + if [ "$(find /etc/network/interfaces.d/ -type f | wc -l)" -ge 1 ]; then + # echo squashes possible multi line replies to a single line + TO_ECHO_AND_MAKE_LINT_HAPPY="$(grep -r -i "^# $1:" '/etc/network/interfaces.d/' | sed "s/^# $1: //i")" + echo "${TO_ECHO_AND_MAKE_LINT_HAPPY}" + fi + fi +} + +alias_from_ip_link() +{ + case "${UNAME}" in + Linux) + ip link show "$1" | grep -e "^[[:space:]]*alias[[:space:]]" | sed -e 's/^[[:space:]]*alias //' + ;; + BSD) + if [ "$(ifconfig "$1" | grep 'description:' | head -n 1 | cut -d: -f 2- | wc -l)" -eq 1 ]; then + ifconfig "$1" | grep 'description:' | head -n 1 | cut -d: -f 2- | sed 's/^ //' + else + echo "$1" + fi + ;; + *) echo "$1" ;; + esac +} + +IFACE_ID="$(interface_id)" +[ "$IFACE_ID" ] || exit + +IFACE="$(interface_name "$IFACE_ID")" + +VALUE= +# we first try to get alias from interface config file +[ "$VALUE" ] || VALUE="$(alias_from_interfaces_config_file "$IFACE")" +# then from ip link show $IFACE output +[ "$VALUE" ] || VALUE="$(alias_from_ip_link "$IFACE")" + +echo "${BASE}.${IFACE_ID}" +echo "string" +echo "$VALUE" +exit 0 From 1b332ab584e9e98128a989058c3768d5c0832b23 Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 18 Jan 2024 02:09:24 +0100 Subject: [PATCH 403/497] replace nan with 0 (#505) * replace nan with 0 replace nan with 0 of pool has a sice of zero * . --- snmp/dhcp.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/dhcp.py b/snmp/dhcp.py index a43b38760..b95747722 100755 --- a/snmp/dhcp.py +++ b/snmp/dhcp.py @@ -121,7 +121,7 @@ "network": p[0], "max": p[1], "cur": p[2], - "percent": p[3], + "percent": 0 if p[3] == "nan" else p[3], } ) continue From 7585501502501f38785eddda9220b1928e1940d8 Mon Sep 17 00:00:00 2001 From: tevkar Date: Thu, 22 Feb 2024 16:27:36 +0100 Subject: [PATCH 404/497] Deliver output for a specific memcached instance (#504) To fix issues around 'no memcache output'. Associated with the relevant branch for librenms. --- snmp/memcached | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/snmp/memcached b/snmp/memcached index f0d7844ee..c8f79d07e 100755 --- a/snmp/memcached +++ b/snmp/memcached @@ -11,12 +11,16 @@ if (! class_exists('Memcached')) { exit; } +$server='localhost'; +$port=11211; $m = new Memcached(); -$m->addServer('localhost', 11211); +$m->addServer($server, $port); echo json_encode(array( - 'data' => $m->getStats(), + // 'data' => $m->getStats(), + 'data' => ($m->getStats())["$server:$port"], 'error' => $m->getLastErrorCode(), 'errorString' => $m->getLastErrorMessage(), 'version' => '1.1', )); + From 01708e0e96aa7e392f26cd0ebf490c1c638c8866 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?F=C3=A9lix=20Bouynot?= Date: Thu, 22 Feb 2024 16:32:24 +0100 Subject: [PATCH 405/497] Fix for systems with more than 4 GPU and recent nvidia-smi version (#506) --- snmp/nvidia | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/snmp/nvidia b/snmp/nvidia index 8bb900f35..0495dc78a 100644 --- a/snmp/nvidia +++ b/snmp/nvidia @@ -17,14 +17,13 @@ sed='/usr/bin/env sed' # 0 1 43 3 2 0 0 2700 862 0 0 462 4 - - 0 26 3 $nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' -lines=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) +gpu=$($nvidiasmi dmon -c 1 -s pucvmet | $grep -v ^# | $sed 's/^ *//' | $sed 's/ */,/g' | $sed 's/-/0/g' | wc -l) # if we are less than 5 then all GPUs were printed -if [ "$lines" -lt 5 ]; then +if [ "$gpu" -lt 5 ]; then exit 0; fi -gpu=5 loop=1 while [ $loop -eq 1 ] do From 46a1e8c03768d6d60b015f492003b676057a0c9e Mon Sep 17 00:00:00 2001 From: Peca Nesovanovic <59750439+Npeca75@users.noreply.github.com> Date: Mon, 26 Feb 2024 17:48:21 +0100 Subject: [PATCH 406/497] [ups-nut] Add temperature readout (#508) * Add temperature readout expose Battery Temperature value * Update ups-nut.sh --- snmp/ups-nut.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index e8dd3a824..b75580a4b 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -39,3 +39,7 @@ do fi done +UPSTEMP="ups\.temperature: [0-9.]+" +OUT=$(echo "$TMP" | grep -Eo "$UPSTEMP" | awk '{print $2}' | LANG=C sort | head -n 1) +[ -n "$OUT" ] && echo "$OUT" || echo "Unknown" + From d7c3e850bc5a2a4a89adafb73b07337e951e9a8b Mon Sep 17 00:00:00 2001 From: Anton Lundin Date: Tue, 27 Feb 2024 12:52:14 +0100 Subject: [PATCH 407/497] Add a plain bash version of memcached helper This is for servers which don't have php installed. --- snmp/memcached.sh | 49 +++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100755 snmp/memcached.sh diff --git a/snmp/memcached.sh b/snmp/memcached.sh new file mode 100755 index 000000000..627a2f730 --- /dev/null +++ b/snmp/memcached.sh @@ -0,0 +1,49 @@ +#!/bin/bash + +declare -A stats + +exec 200<>/dev/tcp/localhost/11211 +echo "stats" >&200 +echo "quit" >&200 + +while read -r pre var val ; do + if [ "$pre" = "END" ] ; then + break + elif [ "$pre" = "STAT" ] ; then + val="${val/$'\r'/}" + if [ "$var" = "rusage_system" ] || [ "$var" = "rusage_user" ] ; then + val=$(bc -l <<< "scale=0 ; ($val * 1000)/1") + var+="_microseconds" + fi + stats["$var"]=$val + fi +done <&200 + +exec 200>&- + +cat </dev/null ; then + #echo -nE "s:${#var}:\"$var\";i:$val;" + echo "\"$var\": $val," + else + #echo -nE "s:${#var}:\"$var\";s:${#val}:\"$val\";" + echo "\"$var\": \"$val\"," + fi +done +echo '"dummy":"value"' + +cat < Date: Sat, 2 Mar 2024 09:31:40 -0600 Subject: [PATCH 408/497] initial nfs stuff for freebsd done --- snmp/nfs | 622 +++++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 622 insertions(+) create mode 100755 snmp/nfs diff --git a/snmp/nfs b/snmp/nfs new file mode 100755 index 000000000..93e88ceca --- /dev/null +++ b/snmp/nfs @@ -0,0 +1,622 @@ +#!/usr/bin/env perl + +## +## +## General Notes +## +## +# +# FreeBSD used as the design basis given better stats produced and as well +# as actually documented. + +### +### +### Linux Notes +### +### +# +# What the following map to if if there is a FreeBSD equivalent is not clear. +# +# fs_locations +# test_stateid +# fsid_present +# open_conf +# confirm +# null + +use strict; +use warnings; +use Getopt::Std; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +#the version of returned data +my $VERSION = 1; + +my $pretty; +my $cache_base = '/var/cache/nfs.json'; +my $write; +my $compress = 1; +my $version; +my $help; +GetOptions( + p => \$pretty, + 'b=s' => \$compress, + 'o=s' => \$cache_base, + 'w' => \$write, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + is_client => 0, + is_server => 0, + mounts => [], + mounted_by => [], + stats => { + client_rpc_null => 0, + client_rpc_Getattr => 0, + client_rpc_Setattr => 0, + client_rpc_Lookup => 0, + client_rpc_Readlink => 0, + client_rpc_Read => 0, + client_rpc_Write => 0, + client_rpc_Create => 0, + client_rpc_Remove => 0, + client_rpc_Rename => 0, + client_rpc_Link => 0, + client_rpc_Symlink => 0, + client_rpc_Mkdir => 0, + client_rpc_Rmdir => 0, + client_rpc_Readdir => 0, + client_rpc_RdirPlus => 0, + client_rpc_Access => 0, + client_rpc_Mknod => 0, + client_rpc_Fsstat => 0, + client_rpc_FSinfo => 0, + client_rpc_pathConf => 0, + client_rpc_Commit => 0, + client_rpc_SetClId => 0, + client_rpc_SetClIdCf => 0, + client_rpc_Lock => 0, + client_rpc_LockT => 0, + client_rpc_LockU => 0, + client_rpc_Open => 0, + client_rpc_OpenCfr => 0, + client_rpc_OpenDownGr => 0, + client_rpc_Close => 0, + client_rpc_RelLckOwn => 0, + client_rpc_FreeStateID => 0, + client_rpc_PutRootFH => 0, + client_rpc_DelegRet => 0, + client_rpc_GetAcl => 0, + client_rpc_SetAcl => 0, + client_rpc_ExchangeId => 0, + client_rpc_CreateSess => 0, + client_rpc_DestroySess => 0, + client_rpc_DestroyClId => 0, + client_rpc_LayoutGet => 0, + client_rpc_GetDevInfo => 0, + client_rpc_LayoutCommit => 0, + client_rpc_LayoutReturn => 0, + client_rpc_ReclaimCompl => 0, + client_rpc_ReadDataS => 0, + client_rpc_WriteDataS => 0, + client_rpc_CommitDataS => 0, + client_rpc_OpenLayout => 0, + client_rpc_CreateLayout => 0, + client_rpc_BindConnSess => 0, + client_rpc_LookupOpen => 0, + client_rpc_IOAdvise => 0, + client_rpc_Allocate => 0, + client_rpc_Copy => 0, + client_rpc_Seek => 0, + client_rpc_SeekDataS => 0, + client_rpc_GetExtattr => 0, + client_rpc_SetExtattr => 0, + client_rpc_RmExtattr => 0, + client_rpc_ListExtattr => 0, + client_rpc_Deallocate => 0, + client_rpc_LayoutError => 0, + client_OpenOwner => 0, + client_Opens => 0, + client_LockOwner => 0, + client_Locks => 0, + client_Delegs => 0, + client_LocalOwn => 0, + client_LocalOpen => 0, + client_LocalLown => 0, + client_LocalLock => 0, + client_Layouts => 0, + client_rpc_info_TimedOut => 0, + client_rpc_info_Invalid => 0, + client_rpc_info_X_Replies => 0, + client_rpc_info_Retries => 0, + client_rpc_info_Requests => 0, + client_cache_Attr_Hits => 0, + client_cache_Attr_Misses => 0, + client_cache_Lkup_Hits => 0, + client_cache_Lkup_Misses => 0, + client_cache_BioR_Hits => 0, + client_cache_BioR_Misses => 0, + client_cache_BioW_Hits => 0, + client_cache_BioW_Misses => 0, + client_cache_BioRL_Hits => 0, + client_cache_BioRL_Misses => 0, + client_cache_BioD_Hits => 0, + client_cache_BioD_Misses => 0, + client_cache_DirE_Hits => 0, + client_cache_DirE_Misses => 0, + server_Getattr => 0, + server_Setattr => 0, + server_Lookup => 0, + server_Readlink => 0, + server_Read => 0, + server_Write => 0, + server_Create => 0, + server_Remove => 0, + server_Rename => 0, + server_Link => 0, + server_Symlink => 0, + server_Mkdir => 0, + server_Rmdir => 0, + server_Readdir => 0, + server_RdirPlus => 0, + server_Access => 0, + server_Mknod => 0, + server_Fsstat => 0, + server_FSinfo => 0, + server_pathConf => 0, + server_Commit => 0, + server_LookupP => 0, + server_SetClId => 0, + server_SetClIdCf => 0, + server_Open => 0, + server_OpenAttr => 0, + server_OpenDwnGr => 0, + server_OpenCfrm => 0, + server_DelePurge => 0, + server_DelRet => 0, + server_GetFH => 0, + server_Lock => 0, + server_LockT => 0, + server_LockU => 0, + server_Close => 0, + server_Verify => 0, + server_NVerify => 0, + server_PutFH => 0, + server_PutPubFH => 0, + server_PutRootFH => 0, + server_Renew => 0, + server_RestoreFH => 0, + server_SaveFH => 0, + server_Secinfo => 0, + server_RelLockOwn => 0, + server_V4Create => 0, + server_BackChannelCt => 0, + server_BindConnToSes => 0, + server_ExchangeID => 0, + server_CreateSess => 0, + server_DestroySess => 0, + server_FreeStateID => 0, + server_GetDirDeleg => 0, + server_GetDevInfo => 0, + server_GetDevList => 0, + server_layoutCommit => 0, + server_LayoutGet => 0, + server_LayoutReturn => 0, + server_GetDirDeleg => 0, + server_GetDevInfo => 0, + server_GetDevList => 0, + server_layoutCommit => 0, + server_LayoutGet => 0, + server_LayoutReturn => 0, + server_SecInfNoName => 0, + server_Sequence => 0, + server_SetSSV => 0, + server_TestStateID => 0, + server_WantDeleg => 0, + server_DestroyClId => 0, + server_ReclaimCompl => 0, + server_Allocate => 0, + server_Copy => 0, + server_CopyNotify => 0, + server_Deallocate => 0, + server_IOAdvise => 0, + server_LayoutError => 0, + server_LayoutStats => 0, + server_OffloadCncl => 0, + server_OffloadStat => 0, + server_ReadPlus => 0, + server_Seek => 0, + server_WriteSame => 0, + server_Clone => 0, + server_GetExtattr => 0, + server_SetExtattr => 0, + server_ListExtattr => 0, + server_RmExtattr => 0, + server_Clients => 0, + server_OpenOwner => 0, + server_Opens => 0, + server_LockOwner => 0, + server_Locks => 0, + server_Delegs => 0, + server_Layouts => 0, + server_cache_Inprog => 0, + 'server_cache_Non-idem' => 0, + server_cache_Misses => 0, + server_cache_CacheSize => 0, + server_cache_TCPPeak => 0, + } +}; + +#### +#### +#### handle getting stats for FreeBSD +#### +#### +if ( $^O eq 'freebsd' ) { + my $output_raw = `nfsstat -E`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line = ''; + foreach my $line (@output_split) { + if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, + $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, + $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, + $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, + $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, + $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, + $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, + $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, + $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, + $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenDownGr\ +Close/ ) { + $line =~ s/^ +//; + ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, + $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, + $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, + $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, + $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} + ) = split( / +/m, $line ); + } elsif ( + $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, + $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, + $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, + $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, + $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, + $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, + $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, + $data->{stats}{client_rpc_LayoutError} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, + $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LockOwner} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, + $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, + $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, + $data->{stats}{client_rpc_info_Requests} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, + $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, + $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /BioRL\ Hits\ +BioRL\ Misse\ +BioD\ Hits\ +BioD\ Misses/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, + $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ ) { + $line =~ s/^ +//; + ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) + = split( / +/m, $line ); + } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, + $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, + $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, + $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, + $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, + $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, + $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, + $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, + $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} + ) = split( / +/m, $line ); + } elsif ( $previous_line + =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, + $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, + $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, + ) = split( / +/m, $line ); + } elsif ( + $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ ) + { + $line =~ s/^ +//; + ( + $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, + $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, + $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, + $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, + $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /ReclaimCompl/ ) { + $line =~ s/^ +//; + ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, + $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, + $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /LayoutStats\ +OffloadCncl\ +OffloadStat\ +ReadPlus\ +Seek\ +WriteSame/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_LayoutStats}, $data->{stats}{server_OffloadCncl}, + $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, + $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, + $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, + $data->{stats}{server_RmExtattr} + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, + $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, + ) = split( / +/m, $line ); + } elsif ( $previous_line =~ /^ *Layouts *$/ ) { + $line =~ s/^ +//; + $line =~ s/ +$//; + $data->{stats}{server_Clients} = $line; + } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ ) { + $line =~ s/^ +//; + ( + $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, + $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_CacheSize}, + $data->{stats}{server_cache_TCPPeak} + ) = split( / +/m, $line ); + } + $previous_line = $line; + } ## end foreach my $line (@output_split) +} ## end if ( $^O eq 'freebsd' ) + +#### +#### +#### handle getting stats for Linux +#### +#### +if ( $^O eq 'linux' ) { + +} + +#### +#### +#### figure out if is a client and/or server +#### +#### +my @stat_keys = keys( %{ $data->{stats} } ); +foreach my $item (@stat_keys) { + if ($item=~/^client/ && $data->{stats}{$item} > 0) { + $data->{is_client}=1 + }elsif ($item=~/^server/ && $data->{stats}{$item} > 0) { + $data->{is_server}=1 + } +} + +#### +#### +#### if server, call showmount +#### +#### +if ($data->{is_server}) { + my $output_raw = `showmount -a`; + my @output_split = split( /\n/, $output_raw ); + foreach my $line (@output_split) { + if ($line=~/\:\//) { + my ($host, $path)=split(/\:\//, $line); + push(@{$data->{mounted_by}}, {host=>$host, path=>'/'.$path}); + } + } +} + +#### +#### +#### if client, call nfsstat -m +#### +#### +if ($data->{is_client}) { + if ($^O eq 'freebsd') { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line=''; + my $host; + my $rpath; + my $lpath; + foreach my $line (@output_split) { + if ($line =~ /\:\/.* on \//) { + $host=$line; + $host=~s/\:\/.*$//; + + $rpath=$line; + $rpath=~s/\ on\ \/.*$//; + $rpath=~s/^.*\:\///; + $rpath='/'.$rpath; + + $lpath=$line; + $lpath=~s/^.*\:\/.*\ on \///; + $lpath='/'.$lpath; + }elsif ($line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { + my @flags; + my %opts; + my @line_split=split(/\,/, $line); + foreach my $item (@line_split) { + if ($item =~ /\=/) { + my ($var, $val)=split(/\=/, $item); + $opts{$var}=$val; + }else { + push(@flags, $item); + } + } + push(@{$data->{mounted}}, { host=>$host, rpath=>$rpath, lpath=>$lpath,flags=>\@flags, opts=>\%opts }); + } + } + }elsif ($^O eq 'linux') { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + } +} + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $j = JSON->new; +if ($pretty) { + $j->pretty(1); +} +print $j->encode($to_return); +if ( !$pretty ) { + print "\n"; +} From 28f51c3ba0154d7f66de3340fe807aa49a81b819 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 2 Mar 2024 09:43:48 -0600 Subject: [PATCH 409/497] remove Getopt::Std --- snmp/nfs | 1 - 1 file changed, 1 deletion(-) diff --git a/snmp/nfs b/snmp/nfs index 93e88ceca..1b0c4ac77 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -26,7 +26,6 @@ use strict; use warnings; -use Getopt::Std; use Getopt::Long; use File::Slurp; use MIME::Base64; From d969c9549e8da8a80268a7cfa8c2ade05faec1d8 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 2 Mar 2024 14:55:59 -0600 Subject: [PATCH 410/497] nfsstat -m works for Linux now as well --- snmp/nfs | 121 ++++++++++++++++++++++++++++++++++++------------------- 1 file changed, 80 insertions(+), 41 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 1b0c4ac77..3a59ead22 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -36,6 +36,9 @@ use JSON; #the version of returned data my $VERSION = 1; +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin'; + my $pretty; my $cache_base = '/var/cache/nfs.json'; my $write; @@ -537,10 +540,10 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ($item=~/^client/ && $data->{stats}{$item} > 0) { - $data->{is_client}=1 - }elsif ($item=~/^server/ && $data->{stats}{$item} > 0) { - $data->{is_server}=1 + if ( $item =~ /^client/ && $data->{stats}{$item} > 0 ) { + $data->{is_client} = 1; + } elsif ( $item =~ /^server/ && $data->{stats}{$item} > 0 ) { + $data->{is_server} = 1; } } @@ -549,63 +552,99 @@ foreach my $item (@stat_keys) { #### if server, call showmount #### #### -if ($data->{is_server}) { - my $output_raw = `showmount -a`; - my @output_split = split( /\n/, $output_raw ); +if ( $data->{is_server} ) { + my $output_raw = `showmount -a`; + my @output_split = split( /\n/, $output_raw ); foreach my $line (@output_split) { - if ($line=~/\:\//) { - my ($host, $path)=split(/\:\//, $line); - push(@{$data->{mounted_by}}, {host=>$host, path=>'/'.$path}); + if ( $line =~ /\:\// ) { + my ( $host, $path ) = split( /\:\//, $line ); + push( @{ $data->{mounted_by} }, { host => $host, path => '/' . $path } ); } } -} +} ## end if ( $data->{is_server} ) #### #### #### if client, call nfsstat -m #### #### -if ($data->{is_client}) { - if ($^O eq 'freebsd') { - my $output_raw = `nfsstat -m`; - my @output_split = split( /\n/, $output_raw ); - my $previous_line=''; +if ( $data->{is_client} ) { + if ( $^O eq 'freebsd' ) { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); my $host; my $rpath; my $lpath; foreach my $line (@output_split) { - if ($line =~ /\:\/.* on \//) { - $host=$line; - $host=~s/\:\/.*$//; + if ( $line =~ /\:\/.* on \// ) { + $host = $line; + $host =~ s/\:\/.*$//; - $rpath=$line; - $rpath=~s/\ on\ \/.*$//; - $rpath=~s/^.*\:\///; - $rpath='/'.$rpath; + $rpath = $line; + $rpath =~ s/\ on\ \/.*$//; + $rpath =~ s/^.*\:\///; + $rpath = '/' . $rpath; - $lpath=$line; - $lpath=~s/^.*\:\/.*\ on \///; - $lpath='/'.$lpath; - }elsif ($line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { + $lpath = $line; + $lpath =~ s/^.*\:\/.*\ on \///; + $lpath = '/' . $lpath; + } elsif ( $line =~ /\,/ && defined($host) && defined($rpath) && defined($lpath) ) { my @flags; my %opts; - my @line_split=split(/\,/, $line); + my @line_split = split( /\,/, $line ); foreach my $item (@line_split) { - if ($item =~ /\=/) { - my ($var, $val)=split(/\=/, $item); - $opts{$var}=$val; - }else { - push(@flags, $item); + if ( $item =~ /\=/ ) { + my ( $var, $val ) = split( /\=/, $item ); + $opts{$var} = $val; + } else { + push( @flags, $item ); } } - push(@{$data->{mounted}}, { host=>$host, rpath=>$rpath, lpath=>$lpath,flags=>\@flags, opts=>\%opts }); - } - } - }elsif ($^O eq 'linux') { - my $output_raw = `nfsstat -m`; - my @output_split = split( /\n/, $output_raw ); - } -} + push( + @{ $data->{mounted} }, + { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } + ); + } ## end elsif ( $line =~ /\,/ && defined($host) && defined...) + } ## end foreach my $line (@output_split) + } elsif ( $^O eq 'linux' ) { + my $output_raw = `nfsstat -m`; + my @output_split = split( /\n/, $output_raw ); + my $host; + my $rpath; + my $lpath; + foreach my $line (@output_split) { + if ( $line =~ /^\/.*\ from\ .*\:\/.*/ ) { + $lpath = $line; + $lpath =~ s/\ from\ .*$//; + + $host = $line; + $host =~ s/.*\ from\ //; + $host =~ s/\:\/.*$//; + + $rpath = $line; + $rpath =~ s/^.*\:\///; + $rpath = '/' . $rpath; + } elsif ( $line =~ /Flags\:[\ \t]+/ && defined($lpath) && defined($host) && defined($rpath) ) { + $line =~ s/^.*Flags\:[\ \t]+//; + my @flags; + my %opts; + my @line_split = split( /\,/, $line ); + foreach my $item (@line_split) { + if ( $item =~ /\=/ ) { + my ( $var, $val ) = split( /\=/, $item ); + $opts{$var} = $val; + } else { + push( @flags, $item ); + } + } + push( + @{ $data->{mounted} }, + { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } + ); + } ## end elsif ( $line =~ /Flags\:[\ \t]+/ && defined(...)) + } ## end foreach my $line (@output_split) + } ## end elsif ( $^O eq 'linux' ) +} ## end if ( $data->{is_client} ) #add the data has to the return hash $to_return->{data} = $data; From 9f3e1047c3f903f3eabbdb44f5508b2782e49f03 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 3 Mar 2024 12:39:43 -0600 Subject: [PATCH 411/497] now works for Linux and FreeBSD --- snmp/nfs | 202 +++++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 198 insertions(+), 4 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 3a59ead22..3adad5bc9 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -75,9 +75,23 @@ my $to_return = { my $data = { is_client => 0, is_server => 0, + os => $^O, mounts => [], mounted_by => [], stats => { + client_rpc_clone => 0, + client_rpc_layoutstats => 0, + client_rpc_getdevicelist => 0, + client_rpc_test_stateid => 0, + client_rpc_secinfo_no => 0, + client_rpc_get_lease_time => 0, + client_rpc_sequence => 0, + client_rpc_fsid_present => 0, + client_rpc_secinfo => 0, + client_rpc_fs_locations => 0, + client_rpc_server_caps => 0, + client_rpc_renew => 0, + client_rpc_confirm => 0, client_rpc_null => 0, client_rpc_Getattr => 0, client_rpc_Setattr => 0, @@ -271,6 +285,16 @@ my $data = { server_cache_Misses => 0, server_cache_CacheSize => 0, server_cache_TCPPeak => 0, + server_calls => 0, + server_badcalls => 0, + server_badfmt => 0, + server_badauth => 0, + server_badclnt => 0, + server_null => 0, + server_compound => 0, + 'server_op0-unused' => 0, + 'server_op1-unused' => 0, + 'server_op2-future' => 0, } }; @@ -530,8 +554,178 @@ if ( $^O eq 'freebsd' ) { #### #### if ( $^O eq 'linux' ) { - -} + my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; + my @output_split = split( /\n/, $output_raw ); + my $previous_line = ''; + foreach my $line (@output_split) { + if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ ) { + ( + $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, + $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /null\ +compound/ ) { + ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ ) { + ( + $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, + $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, + $data->{stats}{server_Close}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ ) { + ( + $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, + $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ ) { + ( + $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, + $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ ) { + ( + $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, + $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ ) { + ( + $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, + $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ ) { + ( + $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, + $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ ) { + ( + $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, + $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ ) { + ( + $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, + $data->{stats}{server_Verify}, $data->{stats}{server_Write}, + $data->{stats}{server_RelLockOwn}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ ) { + ( + $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, + $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, + $data->{stats}{server_DestroySess}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ ) { + ( + $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, + $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, + $data->{stats}{server_layoutCommit}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ ) { + ( + $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, + $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, + $data->{stats}{server_SetSSV}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ ) { + ( + $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, + $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, + $data->{stats}{server_Allocate}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ ) { + ( + $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, + $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, + $data->{stats}{server_LayoutError}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ ) { + ( + $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, + $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, + $data->{stats}{server_Seek}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /write_same/ ) { + ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { + ( + $data->{stats}{client_rpc_info_Requests}, + $data->{stats}{client_rpc_info_Retries}, + $data->{stats}{client_rpc_info_X_Replies} + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ ) { + ( + $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, + $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, + $data->{stats}{client_rpc_Open}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ ) { + ( + $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, + $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, + $data->{stats}{client_rpc_Open}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ ) { + ( + $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, + $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, + $data->{stats}{client_rpc_Lock}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ ) { + ( + $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, + $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, + $data->{stats}{client_rpc_Lookup}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ ) { + ( + $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, + $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, + $data->{stats}{client_rpc_Symlink}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ ) { + ( + $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, + $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, + $data->{stats}{client_rpc_Readlink}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ ) { + ( + $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, + $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, + $data->{stats}{client_rpc_fs_locations}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ ) { + ( + $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, + $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, + $data->{stats}{client_rpc_CreateSess}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ ) { + ( + $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, + $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, + $data->{stats}{client_rpc_LayoutGet}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ ) { + ( + $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, + $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, + $data->{stats}{client_rpc_test_stateid}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ ) { + ( + $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, + $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, + $data->{stats}{client_rpc_Seek}, + ) = split( /[\ \t]+/m, $line ); + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ ) { + ( + $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, + $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, + ) = split( /[\ \t]+/m, $line ); + } + $previous_line = $line; + } ## end foreach my $line (@output_split) +} ## end if ( $^O eq 'linux' ) #### #### @@ -601,7 +795,7 @@ if ( $data->{is_client} ) { } } push( - @{ $data->{mounted} }, + @{ $data->{mounts} }, { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } ); } ## end elsif ( $line =~ /\,/ && defined($host) && defined...) @@ -638,7 +832,7 @@ if ( $data->{is_client} ) { } } push( - @{ $data->{mounted} }, + @{ $data->{mounts} }, { host => $host, rpath => $rpath, lpath => $lpath, flags => \@flags, opts => \%opts } ); } ## end elsif ( $line =~ /Flags\:[\ \t]+/ && defined(...)) From 944a806f6b06b152c11b49d499cc08759f99eda7 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 3 Mar 2024 13:22:54 -0600 Subject: [PATCH 412/497] add nfs --- snmp/nfs | 110 +++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 98 insertions(+), 12 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 3adad5bc9..82d9c449e 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -1,5 +1,59 @@ #!/usr/bin/env perl +=head1 NAME + +nfs - LibreNMS JSON style SNMP extend for NFS monitoring + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +nfs [B<-w>] [B<-b>] [B<-o> ] + +nfs --help|-h + +nfs --version|-v + +=head1 SNMPD CONFIG + + extend nfs /etc/snmp/extends/nfs -b + +or if using cron... + + extend nfs cat /var/cache/nfs.json.snmp + +=head1 DESCRIPTION + +Uses showmount and nfsstat to gather information for the OSes below for NFS. + + FreeBSD + Linux + +=head1 FLAGS + +=head2 -w + +Write the results out. + +=head2 -b + +Print out the compressed data if GZip+Base64 is smaller. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/nfs.json', +meaning it will be written out to the two locations. + + /var/cache/nfs.json + /var/cache/nfs.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=cut + ## ## ## General Notes @@ -42,14 +96,13 @@ $ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin'; my $pretty; my $cache_base = '/var/cache/nfs.json'; my $write; -my $compress = 1; +my $compress; my $version; my $help; GetOptions( - p => \$pretty, - 'b=s' => \$compress, 'o=s' => \$cache_base, - 'w' => \$write, + w => \$write, + b => \$compress, v => \$version, version => \$version, h => \$help, @@ -844,11 +897,44 @@ if ( $data->{is_client} ) { $to_return->{data} = $data; #finally render the JSON -my $j = JSON->new; -if ($pretty) { - $j->pretty(1); -} -print $j->encode($to_return); -if ( !$pretty ) { - print "\n"; -} +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + write_file( $cache_base . '.snmp', $raw_json ); + } else { + write_file( $cache_base . '.snmp', $compressed ); + $print_compressed = 1; + } + + if ( $compress && $print_compressed ) { + print $compressed; + } else { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + print $raw_json; + } else { + print $compressed; + } +} ## end else [ if ($write) ] From de9fcc3beeaf4b91ac699a5555c08583ecae7b86 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 7 Mar 2024 19:20:31 -0600 Subject: [PATCH 413/497] now null for not found and more cleanup --- snmp/nfs | 604 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 316 insertions(+), 288 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 82d9c449e..a82159a24 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -132,222 +132,222 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_clone => 0, - client_rpc_layoutstats => 0, - client_rpc_getdevicelist => 0, - client_rpc_test_stateid => 0, - client_rpc_secinfo_no => 0, - client_rpc_get_lease_time => 0, - client_rpc_sequence => 0, - client_rpc_fsid_present => 0, - client_rpc_secinfo => 0, - client_rpc_fs_locations => 0, - client_rpc_server_caps => 0, - client_rpc_renew => 0, - client_rpc_confirm => 0, - client_rpc_null => 0, - client_rpc_Getattr => 0, - client_rpc_Setattr => 0, - client_rpc_Lookup => 0, - client_rpc_Readlink => 0, - client_rpc_Read => 0, - client_rpc_Write => 0, - client_rpc_Create => 0, - client_rpc_Remove => 0, - client_rpc_Rename => 0, - client_rpc_Link => 0, - client_rpc_Symlink => 0, - client_rpc_Mkdir => 0, - client_rpc_Rmdir => 0, - client_rpc_Readdir => 0, - client_rpc_RdirPlus => 0, - client_rpc_Access => 0, - client_rpc_Mknod => 0, - client_rpc_Fsstat => 0, - client_rpc_FSinfo => 0, - client_rpc_pathConf => 0, - client_rpc_Commit => 0, - client_rpc_SetClId => 0, - client_rpc_SetClIdCf => 0, - client_rpc_Lock => 0, - client_rpc_LockT => 0, - client_rpc_LockU => 0, - client_rpc_Open => 0, - client_rpc_OpenCfr => 0, - client_rpc_OpenDownGr => 0, - client_rpc_Close => 0, - client_rpc_RelLckOwn => 0, - client_rpc_FreeStateID => 0, - client_rpc_PutRootFH => 0, - client_rpc_DelegRet => 0, - client_rpc_GetAcl => 0, - client_rpc_SetAcl => 0, - client_rpc_ExchangeId => 0, - client_rpc_CreateSess => 0, - client_rpc_DestroySess => 0, - client_rpc_DestroyClId => 0, - client_rpc_LayoutGet => 0, - client_rpc_GetDevInfo => 0, - client_rpc_LayoutCommit => 0, - client_rpc_LayoutReturn => 0, - client_rpc_ReclaimCompl => 0, - client_rpc_ReadDataS => 0, - client_rpc_WriteDataS => 0, - client_rpc_CommitDataS => 0, - client_rpc_OpenLayout => 0, - client_rpc_CreateLayout => 0, - client_rpc_BindConnSess => 0, - client_rpc_LookupOpen => 0, - client_rpc_IOAdvise => 0, - client_rpc_Allocate => 0, - client_rpc_Copy => 0, - client_rpc_Seek => 0, - client_rpc_SeekDataS => 0, - client_rpc_GetExtattr => 0, - client_rpc_SetExtattr => 0, - client_rpc_RmExtattr => 0, - client_rpc_ListExtattr => 0, - client_rpc_Deallocate => 0, - client_rpc_LayoutError => 0, - client_OpenOwner => 0, - client_Opens => 0, - client_LockOwner => 0, - client_Locks => 0, - client_Delegs => 0, - client_LocalOwn => 0, - client_LocalOpen => 0, - client_LocalLown => 0, - client_LocalLock => 0, - client_Layouts => 0, - client_rpc_info_TimedOut => 0, - client_rpc_info_Invalid => 0, - client_rpc_info_X_Replies => 0, - client_rpc_info_Retries => 0, - client_rpc_info_Requests => 0, - client_cache_Attr_Hits => 0, - client_cache_Attr_Misses => 0, - client_cache_Lkup_Hits => 0, - client_cache_Lkup_Misses => 0, - client_cache_BioR_Hits => 0, - client_cache_BioR_Misses => 0, - client_cache_BioW_Hits => 0, - client_cache_BioW_Misses => 0, - client_cache_BioRL_Hits => 0, - client_cache_BioRL_Misses => 0, - client_cache_BioD_Hits => 0, - client_cache_BioD_Misses => 0, - client_cache_DirE_Hits => 0, - client_cache_DirE_Misses => 0, - server_Getattr => 0, - server_Setattr => 0, - server_Lookup => 0, - server_Readlink => 0, - server_Read => 0, - server_Write => 0, - server_Create => 0, - server_Remove => 0, - server_Rename => 0, - server_Link => 0, - server_Symlink => 0, - server_Mkdir => 0, - server_Rmdir => 0, - server_Readdir => 0, - server_RdirPlus => 0, - server_Access => 0, - server_Mknod => 0, - server_Fsstat => 0, - server_FSinfo => 0, - server_pathConf => 0, - server_Commit => 0, - server_LookupP => 0, - server_SetClId => 0, - server_SetClIdCf => 0, - server_Open => 0, - server_OpenAttr => 0, - server_OpenDwnGr => 0, - server_OpenCfrm => 0, - server_DelePurge => 0, - server_DelRet => 0, - server_GetFH => 0, - server_Lock => 0, - server_LockT => 0, - server_LockU => 0, - server_Close => 0, - server_Verify => 0, - server_NVerify => 0, - server_PutFH => 0, - server_PutPubFH => 0, - server_PutRootFH => 0, - server_Renew => 0, - server_RestoreFH => 0, - server_SaveFH => 0, - server_Secinfo => 0, - server_RelLockOwn => 0, - server_V4Create => 0, - server_BackChannelCt => 0, - server_BindConnToSes => 0, - server_ExchangeID => 0, - server_CreateSess => 0, - server_DestroySess => 0, - server_FreeStateID => 0, - server_GetDirDeleg => 0, - server_GetDevInfo => 0, - server_GetDevList => 0, - server_layoutCommit => 0, - server_LayoutGet => 0, - server_LayoutReturn => 0, - server_GetDirDeleg => 0, - server_GetDevInfo => 0, - server_GetDevList => 0, - server_layoutCommit => 0, - server_LayoutGet => 0, - server_LayoutReturn => 0, - server_SecInfNoName => 0, - server_Sequence => 0, - server_SetSSV => 0, - server_TestStateID => 0, - server_WantDeleg => 0, - server_DestroyClId => 0, - server_ReclaimCompl => 0, - server_Allocate => 0, - server_Copy => 0, - server_CopyNotify => 0, - server_Deallocate => 0, - server_IOAdvise => 0, - server_LayoutError => 0, - server_LayoutStats => 0, - server_OffloadCncl => 0, - server_OffloadStat => 0, - server_ReadPlus => 0, - server_Seek => 0, - server_WriteSame => 0, - server_Clone => 0, - server_GetExtattr => 0, - server_SetExtattr => 0, - server_ListExtattr => 0, - server_RmExtattr => 0, - server_Clients => 0, - server_OpenOwner => 0, - server_Opens => 0, - server_LockOwner => 0, - server_Locks => 0, - server_Delegs => 0, - server_Layouts => 0, - server_cache_Inprog => 0, - 'server_cache_Non-idem' => 0, - server_cache_Misses => 0, - server_cache_CacheSize => 0, - server_cache_TCPPeak => 0, - server_calls => 0, - server_badcalls => 0, - server_badfmt => 0, - server_badauth => 0, - server_badclnt => 0, - server_null => 0, - server_compound => 0, - 'server_op0-unused' => 0, - 'server_op1-unused' => 0, - 'server_op2-future' => 0, + client_rpc_clone => undef, + client_rpc_layoutstats => undef, + client_rpc_getdevicelist => undef, + client_rpc_test_stateid => undef, + client_rpc_secinfo_no => undef, + client_rpc_get_lease_time => undef, + client_rpc_sequence => undef, + client_rpc_fsid_present => undef, + client_rpc_secinfo => undef, + client_rpc_fs_locations => undef, + client_rpc_server_caps => undef, + client_rpc_renew => undef, + client_rpc_confirm => undef, + client_rpc_null => undef, + client_rpc_Getattr => undef, + client_rpc_Setattr => undef, + client_rpc_Lookup => undef, + client_rpc_Readlink => undef, + client_rpc_Read => undef, + client_rpc_Write => undef, + client_rpc_Create => undef, + client_rpc_Remove => undef, + client_rpc_Rename => undef, + client_rpc_Link => undef, + client_rpc_Symlink => undef, + client_rpc_Mkdir => undef, + client_rpc_Rmdir => undef, + client_rpc_Readdir => undef, + client_rpc_RdirPlus => undef, + client_rpc_Access => undef, + client_rpc_Mknod => undef, + client_rpc_Fsstat => undef, + client_rpc_FSinfo => undef, + client_rpc_pathConf => undef, + client_rpc_Commit => undef, + client_rpc_SetClId => undef, + client_rpc_SetClIdCf => undef, + client_rpc_Lock => undef, + client_rpc_LockT => undef, + client_rpc_LockU => undef, + client_rpc_Open => undef, + client_rpc_OpenCfr => undef, + client_rpc_OpenDownGr => undef, + client_rpc_Close => undef, + client_rpc_RelLckOwn => undef, + client_rpc_FreeStateID => undef, + client_rpc_PutRootFH => undef, + client_rpc_DelegRet => undef, + client_rpc_GetAcl => undef, + client_rpc_SetAcl => undef, + client_rpc_ExchangeId => undef, + client_rpc_CreateSess => undef, + client_rpc_DestroySess => undef, + client_rpc_DestroyClId => undef, + client_rpc_LayoutGet => undef, + client_rpc_GetDevInfo => undef, + client_rpc_LayoutCommit => undef, + client_rpc_LayoutReturn => undef, + client_rpc_ReclaimCompl => undef, + client_rpc_ReadDataS => undef, + client_rpc_WriteDataS => undef, + client_rpc_CommitDataS => undef, + client_rpc_OpenLayout => undef, + client_rpc_CreateLayout => undef, + client_rpc_BindConnSess => undef, + client_rpc_LookupOpen => undef, + client_rpc_IOAdvise => undef, + client_rpc_Allocate => undef, + client_rpc_Copy => undef, + client_rpc_Seek => undef, + client_rpc_SeekDataS => undef, + client_rpc_GetExtattr => undef, + client_rpc_SetExtattr => undef, + client_rpc_RmExtattr => undef, + client_rpc_ListExtattr => undef, + client_rpc_Deallocate => undef, + client_rpc_LayoutError => undef, + client_OpenOwner => undef, + client_Opens => undef, + client_LockOwner => undef, + client_Locks => undef, + client_Delegs => undef, + client_LocalOwn => undef, + client_LocalOpen => undef, + client_LocalLown => undef, + client_LocalLock => undef, + client_Layouts => undef, + client_rpc_info_TimedOut => undef, + client_rpc_info_Invalid => undef, + client_rpc_info_X_Replies => undef, + client_rpc_info_Retries => undef, + client_rpc_info_Requests => undef, + client_cache_Attr_Hits => undef, + client_cache_Attr_Misses => undef, + client_cache_Lkup_Hits => undef, + client_cache_Lkup_Misses => undef, + client_cache_BioR_Hits => undef, + client_cache_BioR_Misses => undef, + client_cache_BioW_Hits => undef, + client_cache_BioW_Misses => undef, + client_cache_BioRL_Hits => undef, + client_cache_BioRL_Misses => undef, + client_cache_BioD_Hits => undef, + client_cache_BioD_Misses => undef, + client_cache_DirE_Hits => undef, + client_cache_DirE_Misses => undef, + server_Getattr => undef, + server_Setattr => undef, + server_Lookup => undef, + server_Readlink => undef, + server_Read => undef, + server_Write => undef, + server_Create => undef, + server_Remove => undef, + server_Rename => undef, + server_Link => undef, + server_Symlink => undef, + server_Mkdir => undef, + server_Rmdir => undef, + server_Readdir => undef, + server_RdirPlus => undef, + server_Access => undef, + server_Mknod => undef, + server_Fsstat => undef, + server_FSinfo => undef, + server_pathConf => undef, + server_Commit => undef, + server_LookupP => undef, + server_SetClId => undef, + server_SetClIdCf => undef, + server_Open => undef, + server_OpenAttr => undef, + server_OpenDwnGr => undef, + server_OpenCfrm => undef, + server_DelePurge => undef, + server_DelRet => undef, + server_GetFH => undef, + server_Lock => undef, + server_LockT => undef, + server_LockU => undef, + server_Close => undef, + server_Verify => undef, + server_NVerify => undef, + server_PutFH => undef, + server_PutPubFH => undef, + server_PutRootFH => undef, + server_Renew => undef, + server_RestoreFH => undef, + server_SaveFH => undef, + server_Secinfo => undef, + server_RelLockOwn => undef, + server_V4Create => undef, + server_BackChannelCt => undef, + server_BindConnToSes => undef, + server_ExchangeID => undef, + server_CreateSess => undef, + server_DestroySess => undef, + server_FreeStateID => undef, + server_GetDirDeleg => undef, + server_GetDevInfo => undef, + server_GetDevList => undef, + server_layoutCommit => undef, + server_LayoutGet => undef, + server_LayoutReturn => undef, + server_GetDirDeleg => undef, + server_GetDevInfo => undef, + server_GetDevList => undef, + server_layoutCommit => undef, + server_LayoutGet => undef, + server_LayoutReturn => undef, + server_SecInfNoName => undef, + server_Sequence => undef, + server_SetSSV => undef, + server_TestStateID => undef, + server_WantDeleg => undef, + server_DestroyClId => undef, + server_ReclaimCompl => undef, + server_Allocate => undef, + server_Copy => undef, + server_CopyNotify => undef, + server_Deallocate => undef, + server_IOAdvise => undef, + server_LayoutError => undef, + server_LayoutStats => undef, + server_OffloadCncl => undef, + server_OffloadStat => undef, + server_ReadPlus => undef, + server_Seek => undef, + server_WriteSame => undef, + server_Clone => undef, + server_GetExtattr => undef, + server_SetExtattr => undef, + server_ListExtattr => undef, + server_RmExtattr => undef, + server_Clients => undef, + server_OpenOwner => undef, + server_Opens => undef, + server_LockOwner => undef, + server_Locks => undef, + server_Delegs => undef, + server_Layouts => undef, + server_cache_Inprog => undef, + 'server_cache_Non-idem' => undef, + server_cache_Misses => undef, + server_cache_CacheSize => undef, + server_cache_TCPPeak => undef, + server_calls => undef, + server_badcalls => undef, + server_badfmt => undef, + server_badauth => undef, + server_badclnt => undef, + server_null => undef, + server_compound => undef, + 'server_op0-unused' => undef, + 'server_op1-unused' => undef, + 'server_op2-future' => undef, } }; @@ -360,60 +360,70 @@ if ( $^O eq 'freebsd' ) { my $output_raw = `nfsstat -E`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; + my $mode = ''; foreach my $line (@output_split) { - if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + if ( $line =~ /^[Cc]lient/ ) { + $mode = 'client'; + } elsif ( $line =~ /^[Ss]erver/ ) { + $mode = 'server'; + } + if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ ) { + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ ) { + } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenDownGr\ +Close/ ) { + } elsif ( $previous_line =~ /OpenDownGr\ +Close/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ ) { + } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ ) { + } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ ) + } elsif ( $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ + && $mode eq 'client' ) { $line =~ s/^ +//; ( @@ -421,117 +431,124 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ ) { + } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ ) { + } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ ) { + } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ + && $mode eq 'client' ) + { $line =~ s/^ +//; ( $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, $data->{stats}{client_rpc_LayoutError} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ ) { + } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, - $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LockOwner} + $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LocalOwn} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ ) { + } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ ) { + } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, $data->{stats}{client_rpc_info_Requests} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ ) { + } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ ) { + } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioRL\ Hits\ +BioRL\ Misse\ +BioD\ Hits\ +BioD\ Misses/ ) { + } elsif ( $previous_line =~ /BioRL Hits\ +BioRL\ +Misses\ +BioD Hits\ +BioD Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, - $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses} + $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ ) { + } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ && $mode eq 'client' ) { $line =~ s/^ +//; ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ ) { + } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ ) { + } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ ) { + } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ ) { + } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ ) { + } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ ) { + } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ ) { + } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ ) { + } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} ) = split( / +/m, $line ); - } elsif ( $previous_line - =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ ) + } elsif ( + $previous_line =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ + && $mode eq 'server' ) { $line =~ s/^ +//; ( @@ -540,7 +557,8 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, ) = split( / +/m, $line ); } elsif ( - $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ ) + $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ + && $mode eq 'server' ) { $line =~ s/^ +//; ( @@ -548,17 +566,21 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ ) { + } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ + && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ReclaimCompl/ ) { + } elsif ( $previous_line =~ /ReclaimCompl/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ ) { + } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ + && $mode eq 'server' ) + { $line =~ s/^ +//; ( $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, @@ -572,24 +594,24 @@ if ( $^O eq 'freebsd' ) { $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ ) { + } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, $data->{stats}{server_RmExtattr} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ ) { + } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /^ *Layouts *$/ ) { + } elsif ( $previous_line =~ /^ +Layouts$/ && $mode eq 'server' ) { $line =~ s/^ +//; $line =~ s/ +$//; - $data->{stats}{server_Clients} = $line; - } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ ) { + $data->{stats}{server_Layouts} = $line; + } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ && $mode eq 'server' ) { $line =~ s/^ +//; ( $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, @@ -610,167 +632,173 @@ if ( $^O eq 'linux' ) { my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; + my $mode = ''; foreach my $line (@output_split) { - if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ ) { + if ( $line =~ /^[Cc]lient/ ) { + $mode = 'client'; + } elsif ( $line =~ /^[Ss]erver/ ) { + $mode = 'server'; + } + if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { ( $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +compound/ ) { + } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'client' ) { ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ ) { + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { ( $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, $data->{stats}{server_Close}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ ) { + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ ) { + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { ( $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ ) { + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ ) { + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ && $mode eq 'server' ) { ( $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ ) { + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { ( $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ ) { + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { ( $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ ) { + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { ( $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, $data->{stats}{server_Verify}, $data->{stats}{server_Write}, $data->{stats}{server_RelLockOwn}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ ) { + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { ( $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, $data->{stats}{server_DestroySess}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ ) { + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { ( $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ ) { + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { ( $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, $data->{stats}{server_SetSSV}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ ) { + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { ( $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, $data->{stats}{server_Allocate}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ ) { + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { ( $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ ) { + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { ( $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, $data->{stats}{server_Seek}, ) = split( /[\ \t]+/m, $line ); } elsif ( $previous_line =~ /write_same/ ) { - ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line ); + ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line && $mode eq 'client' ); } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { ( $data->{stats}{client_rpc_info_Requests}, $data->{stats}{client_rpc_info_Retries}, $data->{stats}{client_rpc_info_X_Replies} ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ ) { + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_Open}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ ) { + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_Open}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ ) { + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, $data->{stats}{client_rpc_Lock}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ ) { + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Lookup}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ ) { + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, $data->{stats}{client_rpc_Symlink}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ ) { + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, $data->{stats}{client_rpc_Readlink}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ ) { + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, $data->{stats}{client_rpc_fs_locations}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ ) { + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ ) { + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_LayoutGet}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ ) { + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, $data->{stats}{client_rpc_test_stateid}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ ) { + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, $data->{stats}{client_rpc_Seek}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ ) { + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { ( $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, @@ -787,9 +815,9 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ( $item =~ /^client/ && $data->{stats}{$item} > 0 ) { + if ( $item =~ /^client/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { $data->{is_client} = 1; - } elsif ( $item =~ /^server/ && $data->{stats}{$item} > 0 ) { + } elsif ( $item =~ /^server/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { $data->{is_server} = 1; } } From 12e4a2d809845e064f0fcc4e0f21802065114498 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 9 Mar 2024 11:21:19 -0600 Subject: [PATCH 414/497] begin re-working nfs stats gather... freebsd done now for Linux --- snmp/nfs | 900 ++++++++++++++++++++++--------------------------------- 1 file changed, 358 insertions(+), 542 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index a82159a24..87f518458 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -132,20 +132,6 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_clone => undef, - client_rpc_layoutstats => undef, - client_rpc_getdevicelist => undef, - client_rpc_test_stateid => undef, - client_rpc_secinfo_no => undef, - client_rpc_get_lease_time => undef, - client_rpc_sequence => undef, - client_rpc_fsid_present => undef, - client_rpc_secinfo => undef, - client_rpc_fs_locations => undef, - client_rpc_server_caps => undef, - client_rpc_renew => undef, - client_rpc_confirm => undef, - client_rpc_null => undef, client_rpc_Getattr => undef, client_rpc_Setattr => undef, client_rpc_Lookup => undef, @@ -238,94 +224,94 @@ my $data = { client_cache_BioD_Misses => undef, client_cache_DirE_Hits => undef, client_cache_DirE_Misses => undef, - server_Getattr => undef, - server_Setattr => undef, - server_Lookup => undef, - server_Readlink => undef, - server_Read => undef, - server_Write => undef, - server_Create => undef, - server_Remove => undef, - server_Rename => undef, - server_Link => undef, - server_Symlink => undef, - server_Mkdir => undef, - server_Rmdir => undef, - server_Readdir => undef, - server_RdirPlus => undef, - server_Access => undef, - server_Mknod => undef, - server_Fsstat => undef, - server_FSinfo => undef, - server_pathConf => undef, - server_Commit => undef, - server_LookupP => undef, - server_SetClId => undef, - server_SetClIdCf => undef, - server_Open => undef, - server_OpenAttr => undef, - server_OpenDwnGr => undef, - server_OpenCfrm => undef, - server_DelePurge => undef, - server_DelRet => undef, - server_GetFH => undef, - server_Lock => undef, - server_LockT => undef, - server_LockU => undef, - server_Close => undef, - server_Verify => undef, - server_NVerify => undef, - server_PutFH => undef, - server_PutPubFH => undef, - server_PutRootFH => undef, - server_Renew => undef, - server_RestoreFH => undef, - server_SaveFH => undef, - server_Secinfo => undef, - server_RelLockOwn => undef, - server_V4Create => undef, - server_BackChannelCt => undef, - server_BindConnToSes => undef, - server_ExchangeID => undef, - server_CreateSess => undef, - server_DestroySess => undef, - server_FreeStateID => undef, - server_GetDirDeleg => undef, - server_GetDevInfo => undef, - server_GetDevList => undef, - server_layoutCommit => undef, - server_LayoutGet => undef, - server_LayoutReturn => undef, - server_GetDirDeleg => undef, - server_GetDevInfo => undef, - server_GetDevList => undef, - server_layoutCommit => undef, - server_LayoutGet => undef, - server_LayoutReturn => undef, - server_SecInfNoName => undef, - server_Sequence => undef, - server_SetSSV => undef, - server_TestStateID => undef, - server_WantDeleg => undef, - server_DestroyClId => undef, - server_ReclaimCompl => undef, - server_Allocate => undef, - server_Copy => undef, - server_CopyNotify => undef, - server_Deallocate => undef, - server_IOAdvise => undef, - server_LayoutError => undef, - server_LayoutStats => undef, - server_OffloadCncl => undef, - server_OffloadStat => undef, - server_ReadPlus => undef, - server_Seek => undef, - server_WriteSame => undef, - server_Clone => undef, - server_GetExtattr => undef, - server_SetExtattr => undef, - server_ListExtattr => undef, - server_RmExtattr => undef, + server_rpc_Getattr => undef, + server_rpc_Setattr => undef, + server_rpc_Lookup => undef, + server_rpc_Readlink => undef, + server_rpc_Read => undef, + server_rpc_Write => undef, + server_rpc_Create => undef, + server_rpc_Remove => undef, + server_rpc_Rename => undef, + server_rpc_Link => undef, + server_rpc_Symlink => undef, + server_rpc_Mkdir => undef, + server_rpc_Rmdir => undef, + server_rpc_Readdir => undef, + server_rpc_RdirPlus => undef, + server_rpc_Access => undef, + server_rpc_Mknod => undef, + server_rpc_Fsstat => undef, + server_rpc_FSinfo => undef, + server_rpc_pathConf => undef, + server_rpc_Commit => undef, + server_rpc_LookupP => undef, + server_rpc_SetClId => undef, + server_rpc_SetClIdCf => undef, + server_rpc_Open => undef, + server_rpc_OpenAttr => undef, + server_rpc_OpenDwnGr => undef, + server_rpc_OpenCfrm => undef, + server_rpc_DelePurge => undef, + server_rpc_DelRet => undef, + server_rpc_GetFH => undef, + server_rpc_Lock => undef, + server_rpc_LockT => undef, + server_rpc_LockU => undef, + server_rpc_Close => undef, + server_rpc_Verify => undef, + server_rpc_NVerify => undef, + server_rpc_PutFH => undef, + server_rpc_PutPubFH => undef, + server_rpc_PutRootFH => undef, + server_rpc_Renew => undef, + server_rpc_RestoreFH => undef, + server_rpc_SaveFH => undef, + server_rpc_Secinfo => undef, + server_rpc_RelLockOwn => undef, + server_rpc_V4Create => undef, + server_rpc_BackChannelCt => undef, + server_rpc_BindConnToSes => undef, + server_rpc_ExchangeID => undef, + server_rpc_CreateSess => undef, + server_rpc_DestroySess => undef, + server_rpc_FreeStateID => undef, + server_rpc_GetDirDeleg => undef, + server_rpc_GetDevInfo => undef, + server_rpc_GetDevList => undef, + server_rpc_layoutCommit => undef, + server_rpc_LayoutGet => undef, + server_rpc_LayoutReturn => undef, + server_rpc_GetDirDeleg => undef, + server_rpc_GetDevInfo => undef, + server_rpc_GetDevList => undef, + server_rpc_layoutCommit => undef, + server_rpc_LayoutGet => undef, + server_rpc_LayoutReturn => undef, + server_rpc_SecInfNoName => undef, + server_rpc_Sequence => undef, + server_rpc_SetSSV => undef, + server_rpc_TestStateID => undef, + server_rpc_WantDeleg => undef, + server_rpc_DestroyClId => undef, + server_rpc_ReclaimCompl => undef, + server_rpc_Allocate => undef, + server_rpc_Copy => undef, + server_rpc_CopyNotify => undef, + server_rpc_Deallocate => undef, + server_rpc_IOAdvise => undef, + server_rpc_LayoutError => undef, + server_rpc_LayoutStats => undef, + server_rpc_OffloadCncl => undef, + server_rpc_OffloadStat => undef, + server_rpc_ReadPlus => undef, + server_rpc_Seek => undef, + server_rpc_WriteSame => undef, + server_rpc_Clone => undef, + server_rpc_GetExtattr => undef, + server_rpc_SetExtattr => undef, + server_rpc_ListExtattr => undef, + server_rpc_RmExtattr => undef, server_Clients => undef, server_OpenOwner => undef, server_Opens => undef, @@ -334,20 +320,10 @@ my $data = { server_Delegs => undef, server_Layouts => undef, server_cache_Inprog => undef, - 'server_cache_Non-idem' => undef, + server_cache_NonIdem => undef, server_cache_Misses => undef, server_cache_CacheSize => undef, server_cache_TCPPeak => undef, - server_calls => undef, - server_badcalls => undef, - server_badfmt => undef, - server_badauth => undef, - server_badclnt => undef, - server_null => undef, - server_compound => undef, - 'server_op0-unused' => undef, - 'server_op1-unused' => undef, - 'server_op2-future' => undef, } }; @@ -357,270 +333,272 @@ my $data = { #### #### if ( $^O eq 'freebsd' ) { - my $output_raw = `nfsstat -E`; - my @output_split = split( /\n/, $output_raw ); - my $previous_line = ''; - my $mode = ''; - foreach my $line (@output_split) { - if ( $line =~ /^[Cc]lient/ ) { - $mode = 'client'; - } elsif ( $line =~ /^[Ss]erver/ ) { - $mode = 'server'; - } - if ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Getattr}, $data->{stats}{client_rpc_Setattr}, - $data->{stats}{client_rpc_Lookup}, $data->{stats}{client_rpc_Readlink}, - $data->{stats}{client_rpc_Read}, $data->{stats}{client_rpc_Write} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_Remove}, - $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, - $data->{stats}{client_rpc_Symlink}, $data->{stats}{client_rpc_Mkdir} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_Rmdir}, $data->{stats}{client_rpc_Readdir}, - $data->{stats}{client_rpc_RdirPlus}, $data->{stats}{client_rpc_Access}, - $data->{stats}{client_rpc_Mknod}, $data->{stats}{client_rpc_Fsstat} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +SetClId\ +SetClIdCf\ +Lock/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_pathConf}, - $data->{stats}{client_rpc_Commit}, $data->{stats}{client_rpc_SetClId}, - $data->{stats}{client_rpc_SetClIdCf}, $data->{stats}{client_rpc_Lock} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LockT\ +LockU\ +Open\ +OpenCfr/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, - $data->{stats}{client_rpc_Open}, $data->{stats}{client_rpc_OpenCfr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenDownGr\ +Close/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Close}, ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /RelLckOwn\ +FreeStateID\ +PutRootFH\ +DelegRet\ +GetAcl\ +SetAcl/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_FreeStateID}, - $data->{stats}{client_rpc_PutRootFH}, $data->{stats}{client_rpc_DelegRet}, - $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ExchangeId\ +CreateSess\ +DestroySess\ +DestroyClId\ +LayoutGet\ +GetDevInfo/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_ExchangeId}, $data->{stats}{client_rpc_CreateSess}, - $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_DestroyClId}, - $data->{stats}{client_rpc_LayoutGet}, $data->{stats}{client_rpc_GetDevInfo} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LayoutCommit\ +LayoutReturn\ +ReclaimCompl\ +ReadDataS\ +WriteDataS\ +CommitDataS/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_LayoutCommit}, $data->{stats}{client_rpc_LayoutReturn}, - $data->{stats}{client_rpc_ReclaimCompl}, $data->{stats}{client_rpc_ReadDataS}, - $data->{stats}{client_rpc_WriteDataS}, $data->{stats}{client_rpc_CommitDataS} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenLayout\ +CreateLayout\ +BindConnSess\ +LookupOpen/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_OpenLayout}, $data->{stats}{client_rpc_CreateLayout}, - $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_LookupOpen} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /IOAdvise\ +Allocate\ +Copy\ +Seek\ +SeekDataS\ +GetExtattr/ && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_IOAdvise}, $data->{stats}{client_rpc_Allocate}, - $data->{stats}{client_rpc_Copy}, $data->{stats}{client_rpc_Seek}, - $data->{stats}{client_rpc_SeekDataS}, $data->{stats}{client_rpc_GetExtattr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SetExtattr\ +RmExtattr\ +ListExtattr\ +Deallocate\ +LayoutError/ - && $mode eq 'client' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_SetExtattr}, $data->{stats}{client_rpc_RmExtattr}, - $data->{stats}{client_rpc_ListExtattr}, $data->{stats}{client_rpc_Deallocate}, - $data->{stats}{client_rpc_LayoutError} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs\ +LocalOwn/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_OpenOwner}, $data->{stats}{client_Opens}, $data->{stats}{client_LockOwner}, - $data->{stats}{client_Locks}, $data->{stats}{client_Delegs}, $data->{stats}{client_LocalOwn} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LocalOpen\ +LocalLown\ +LocalLock\ +Layouts/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_LocalOpen}, $data->{stats}{client_LocalLown}, - $data->{stats}{client_LocalLock}, $data->{stats}{client_Layouts} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /TimedOut\ +Invalid\ +X\ Replies\ +Retries\ +Requests/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_rpc_info_TimedOut}, $data->{stats}{client_rpc_info_Invalid}, - $data->{stats}{client_rpc_info_X_Replies}, $data->{stats}{client_rpc_info_Retries}, - $data->{stats}{client_rpc_info_Requests} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Attr\ Hits\ +Attr\ Misses\ +Lkup\ Hits\ +Lkup\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_Attr_Hits}, $data->{stats}{client_cache_Attr_Misses}, - $data->{stats}{client_cache_Lkup_Hits}, $data->{stats}{client_cache_Lkup_Misses} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioR\ Hits\ +BioR\ Misses\ +BioW\ Hits\ +BioW\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_BioR_Hits}, $data->{stats}{client_cache_BioR_Misses}, - $data->{stats}{client_cache_BioW_Hits}, $data->{stats}{client_cache_BioW_Misses} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /BioRL Hits\ +BioRL\ +Misses\ +BioD Hits\ +BioD Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( - $data->{stats}{client_cache_BioRL_Hits}, $data->{stats}{client_cache_BioRL_Misses}, - $data->{stats}{client_cache_BioD_Hits}, $data->{stats}{client_cache_BioD_Misses}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /DirE\ Hits\ +DirE\ Misses/ && $mode eq 'client' ) { - $line =~ s/^ +//; - ( $data->{stats}{client_cache_DirE_Hits}, $data->{stats}{client_cache_DirE_Misses}, ) - = split( / +/m, $line ); - } elsif ( $previous_line =~ /Getattr\ +Setattr\ +Lookup\ +Readlink\ +Read\ +Write/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Getattr}, $data->{stats}{server_Setattr}, $data->{stats}{server_Lookup}, - $data->{stats}{server_Readlink}, $data->{stats}{server_Read}, $data->{stats}{server_Write}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Create\ +Remove\ +Rename\ +Link\ +Symlink\ +Mkdir/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Create}, $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, - $data->{stats}{server_Link}, $data->{stats}{server_Symlink}, $data->{stats}{server_Mkdir}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Rmdir\ +Readdir\ +RdirPlus\ +Access\ +Mknod\ +Fsstat/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Rmdir}, $data->{stats}{server_Readdir}, $data->{stats}{server_RdirPlus}, - $data->{stats}{server_Access}, $data->{stats}{server_Mknod}, $data->{stats}{server_Fsstat}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /FSinfo\ +pathConf\ +Commit\ +LookupP\ +SetClId\ +SetClIdCf/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_FSinfo}, $data->{stats}{server_pathConf}, $data->{stats}{server_Commit}, - $data->{stats}{server_LookupP}, $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Open\ +OpenAttr\ +OpenDwnGr\ +OpenCfrm\ +DelePurge\ +DelRet/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, $data->{stats}{server_OpenDwnGr}, - $data->{stats}{server_OpenCfrm}, $data->{stats}{server_DelePurge}, $data->{stats}{server_DelRet}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /GetFH\ +Lock\ +LockT\ +LockU\ +Close\ +Verify/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_GetFH}, $data->{stats}{server_Lock}, $data->{stats}{server_LockT}, - $data->{stats}{server_LockU}, $data->{stats}{server_Close}, $data->{stats}{server_Verify}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /NVerify\ +PutFH\ +PutPubFH\ +PutRootFH\ +Renew\ +RestoreFH/ && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_NVerify}, $data->{stats}{server_PutFH}, $data->{stats}{server_PutPubFH}, - $data->{stats}{server_PutRootFH}, $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SaveFH\ +Secinfo\ +RelLockOwn\ +V4Create/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_SaveFH}, $data->{stats}{server_Secinfo}, - $data->{stats}{server_RelLockOwn}, $data->{stats}{server_V4Create} - ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /BackChannelCt\ *BindConnToSes\ +ExchangeID\ +CreateSess\ +DestroySess\ +FreeStateID/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, - $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, - $data->{stats}{server_DestroySess}, $data->{stats}{server_FreeStateID}, - ) = split( / +/m, $line ); - } elsif ( - $previous_line =~ /GetDirDeleg\ +GetDevInfo\ +GetDevList\ +[lL]ayoutCommit\ +LayoutGet\ +LayoutReturn/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_GetDirDeleg}, $data->{stats}{server_GetDevInfo}, - $data->{stats}{server_GetDevList}, $data->{stats}{server_layoutCommit}, - $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /SecInfNoName\ +Sequence\ +SetSSV\ +TestStateID\ +WantDeleg\ +DestroyClId/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, - $data->{stats}{server_SetSSV}, $data->{stats}{server_TestStateID}, - $data->{stats}{server_WantDeleg}, $data->{stats}{server_DestroyClId}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /ReclaimCompl/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( $data->{stats}{server_ReclaimCompl} ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Allocate\ +Copy\ +CopyNotify\ +Deallocate\ +IOAdvise\ +LayoutError/ - && $mode eq 'server' ) - { - $line =~ s/^ +//; - ( - $data->{stats}{server_Allocate}, $data->{stats}{server_Copy}, - $data->{stats}{server_CopyNotify}, $data->{stats}{server_Deallocate}, - $data->{stats}{server_IOAdvise}, $data->{stats}{server_LayoutError}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /LayoutStats\ +OffloadCncl\ +OffloadStat\ +ReadPlus\ +Seek\ +WriteSame/ ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_LayoutStats}, $data->{stats}{server_OffloadCncl}, - $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, - $data->{stats}{server_Seek}, $data->{stats}{server_WriteSame}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clone\ +GetExtattr\ +SetExtattr\ +ListExtattr\ +RmExtattr/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Clone}, $data->{stats}{server_GetExtattr}, - $data->{stats}{server_SetExtattr}, $data->{stats}{server_ListExtattr}, - $data->{stats}{server_RmExtattr} - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /Clients\ +OpenOwner\ +Opens\ +LockOwner\ +Locks\ +Delegs/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_Clients}, $data->{stats}{server_OpenOwner}, $data->{stats}{server_Opens}, - $data->{stats}{server_LockOwner}, $data->{stats}{server_Locks}, $data->{stats}{server_Delegs}, - ) = split( / +/m, $line ); - } elsif ( $previous_line =~ /^ +Layouts$/ && $mode eq 'server' ) { - $line =~ s/^ +//; - $line =~ s/ +$//; - $data->{stats}{server_Layouts} = $line; - } elsif ( $previous_line =~ /Inprog\ +Non\-idem\ +Misses\ +CacheSize\ +TCPPeak/ && $mode eq 'server' ) { - $line =~ s/^ +//; - ( - $data->{stats}{server_cache_Inprog}, $data->{stats}{'server_cache_Non-idem'}, - $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_CacheSize}, - $data->{stats}{server_cache_TCPPeak} - ) = split( / +/m, $line ); - } - $previous_line = $line; - } ## end foreach my $line (@output_split) + eval { + my $output_raw = `nfsstat -E --libxo json`; + my $stats_json = decode_json($output_raw); + $data->{stats}{client_rpc_Getattr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{getattr}; + $data->{stats}{client_rpc_Setattr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setattr}; + $data->{stats}{client_rpc_Lookup} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lookup}; + $data->{stats}{client_rpc_Readlink} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{readlink}; + $data->{stats}{client_rpc_Read} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{read}; + $data->{stats}{client_rpc_Write} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{write}; + $data->{stats}{client_rpc_Create} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{create}; + $data->{stats}{client_rpc_Remove} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{remove}; + $data->{stats}{client_rpc_Rename} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rename}; + $data->{stats}{client_rpc_Link} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{link}; + $data->{stats}{client_rpc_Symlink} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{symlink}; + $data->{stats}{client_rpc_Mkdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{mkdir}; + $data->{stats}{client_rpc_Rmdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rmdir}; + $data->{stats}{client_rpc_Readdir} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{readdir}; + $data->{stats}{client_rpc_RdirPlus} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rdirplus}; + $data->{stats}{client_rpc_Access} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{rdirplus}; + $data->{stats}{client_rpc_Mknod} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{mknod}; + $data->{stats}{client_rpc_Fsstat} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{fsstat}; + $data->{stats}{client_rpc_FSinfo} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{fsinfo}; + $data->{stats}{client_rpc_pathConf} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{pathconf}; + $data->{stats}{client_rpc_Commit} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{commit}; + $data->{stats}{client_rpc_SetClId} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setclientid}; + $data->{stats}{client_rpc_SetClIdCf} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{setclientidcf}; + $data->{stats}{client_rpc_Lock} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lock}; + $data->{stats}{client_rpc_LockT} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{lockt}; + $data->{stats}{client_rpc_LockU} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{locku}; + $data->{stats}{client_rpc_Open} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{open}; + $data->{stats}{client_rpc_OpenCfr} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{opencfr}; + $data->{stats}{client_rpc_OpenDownGr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{opendowngr}; + $data->{stats}{client_rpc_Close} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{close}; + $data->{stats}{client_rpc_RelLckOwn} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{rellckown}; + $data->{stats}{client_rpc_FreeStateID} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{freestateid}; + $data->{stats}{client_rpc_PutRootFH} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{putrootfh}; + $data->{stats}{client_rpc_DelegRet} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{delegret}; + $data->{stats}{client_rpc_GetAcl} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{getacl}; + $data->{stats}{client_rpc_SetAcl} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{setacl}; + $data->{stats}{client_rpc_ExchangeId} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{exchangeid}; + $data->{stats}{client_rpc_CreateSess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{createsess}; + $data->{stats}{client_rpc_DestroySess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{destroysess}; + $data->{stats}{client_rpc_DestroyClId} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{destroyclid}; + $data->{stats}{client_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{client_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{client_rpc_LayoutCommit} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutcomit}; + $data->{stats}{client_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{client_rpc_ReclaimCompl} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{client_rpc_ReadDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{readdatas}; + $data->{stats}{client_rpc_WriteDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{writedatas}; + $data->{stats}{client_rpc_CommitDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{commitdatas}; + $data->{stats}{client_rpc_OpenLayout} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{openlayout}; + $data->{stats}{client_rpc_CreateLayout} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{createlayout}; + $data->{stats}{client_rpc_BindConnSess} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{bindconnsess}; + $data->{stats}{client_rpc_LookupOpen} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv41}{lookupopen}; + $data->{stats}{client_rpc_IOAdvise} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{ioadvise}; + $data->{stats}{client_rpc_Allocate} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{allocate}; + $data->{stats}{client_rpc_Copy} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{copy}; + $data->{stats}{client_rpc_Seek} = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{seek}; + $data->{stats}{client_rpc_SeekDataS} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{seekdatas}; + $data->{stats}{client_rpc_GetExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{getextattr}; + $data->{stats}{client_rpc_SetExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{setextattr}; + $data->{stats}{client_rpc_RmExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{rmextattr}; + $data->{stats}{client_rpc_ListExtattr} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{listextattr}; + $data->{stats}{client_rpc_Deallocate} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{deallocate}; + $data->{stats}{client_rpc_LayoutError} + = $stats_json->{nfsstat}{nfsv4}{clientstats}{operations}{nfsv42}{layouterror}; + $data->{stats}{client_OpenOwner} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{openowner}; + $data->{stats}{client_Opens} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{opens}; + $data->{stats}{client_LockOwner} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{lockowner}; + $data->{stats}{client_Locks} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locks}; + $data->{stats}{client_Delegs} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{delegs}; + $data->{stats}{client_LocalOwn} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{localown}; + $data->{stats}{client_LocalOpen} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{localopen}; + $data->{stats}{client_LocalLown} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locallown}; + $data->{stats}{client_LocalLock} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{locallock}; + $data->{stats}{client_Layouts} = $stats_json->{nfsstat}{nfsv4}{clientstats}{client}{layouts}; + $data->{stats}{client_rpc_info_TimedOut} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{timedout}; + $data->{stats}{client_rpc_info_Invalid} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{invalid}; + $data->{stats}{client_rpc_info_X_Replies} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{timedout}; + $data->{stats}{client_rpc_info_Retries} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{retries}; + $data->{stats}{client_rpc_info_Requests} = $stats_json->{nfsstat}{nfsv4}{clientstats}{rpc}{requests}; + $data->{stats}{client_cache_Attr_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{attrhits}; + $data->{stats}{client_cache_Attr_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{attrmisses}; + $data->{stats}{client_cache_Lkup_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{lkuphits}; + $data->{stats}{client_cache_Lkup_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{lkupmisses}; + $data->{stats}{client_cache_BioR_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorhits}; + $data->{stats}{client_cache_BioR_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biormisses}; + $data->{stats}{client_cache_BioW_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biowhits}; + $data->{stats}{client_cache_BioW_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biowmisses}; + $data->{stats}{client_cache_BioRL_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorlhits}; + $data->{stats}{client_cache_BioRL_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biorlmisses}; + $data->{stats}{client_cache_BioD_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biodhits}; + $data->{stats}{client_cache_BioD_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{biodmisses}; + $data->{stats}{client_cache_DirE_Hits} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{direhits}; + $data->{stats}{client_cache_DirE_Misses} = $stats_json->{nfsstat}{nfsv4}{clientstats}{cache}{diremisses}; + $data->{stats}{server_rpc_Getattr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getattr}; + $data->{stats}{server_rpc_Setattr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setattr}; + $data->{stats}{server_rpc_Lookup} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lookup}; + $data->{stats}{server_rpc_Readlink} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{readlink}; + $data->{stats}{server_rpc_Read} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{read}; + $data->{stats}{server_rpc_Write} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{write}; + $data->{stats}{server_rpc_Create} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{create}; + $data->{stats}{server_rpc_Remove} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{remove}; + $data->{stats}{server_rpc_Rename} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rename}; + $data->{stats}{server_rpc_Link} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{link}; + $data->{stats}{server_rpc_Symlink} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{symlink}; + $data->{stats}{server_rpc_Mkdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{mkdir}; + $data->{stats}{server_rpc_Rmdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rmdir}; + $data->{stats}{server_rpc_Readdir} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{readdir}; + $data->{stats}{server_rpc_RdirPlus} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rdirplus}; + $data->{stats}{server_rpc_Access} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{access}; + $data->{stats}{server_rpc_Mknod} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{mknod}; + $data->{stats}{server_rpc_Fsstat} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{fsstat}; + $data->{stats}{server_rpc_FSinfo} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{fsinfo}; + $data->{stats}{server_rpc_pathConf} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{pathconf}; + $data->{stats}{server_rpc_Commit} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{commit}; + $data->{stats}{server_rpc_LookupP} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lookupp}; + $data->{stats}{server_rpc_SetClId} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setclientid}; + $data->{stats}{server_rpc_SetClIdCf} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{setclientidcfrm}; + $data->{stats}{server_rpc_Open} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{open}; + $data->{stats}{server_rpc_OpenAttr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{openattr}; + $data->{stats}{server_rpc_OpenDwnGr} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{opendwgr}; + $data->{stats}{server_rpc_OpenCfrm} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{opencfrm}; + $data->{stats}{server_rpc_DelePurge} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{delepurge}; + $data->{stats}{server_rpc_DelRet} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getattr}; + $data->{stats}{server_rpc_GetFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{getfh}; + $data->{stats}{server_rpc_Lock} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lock}; + $data->{stats}{server_rpc_LockT} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{lockt}; + $data->{stats}{server_rpc_LockU} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{locku}; + $data->{stats}{server_rpc_Close} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{close}; + $data->{stats}{server_rpc_Verify} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{verify}; + $data->{stats}{server_rpc_NVerify} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nverify}; + $data->{stats}{server_rpc_PutFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putfh}; + $data->{stats}{server_rpc_PutPubFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putpubfh}; + $data->{stats}{server_rpc_PutRootFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{putrootfh}; + $data->{stats}{server_rpc_Renew} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{renew}; + $data->{stats}{server_rpc_RestoreFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{restore}; + $data->{stats}{server_rpc_SaveFH} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{savefh}; + $data->{stats}{server_rpc_Secinfo} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{secinfo}; + $data->{stats}{server_rpc_RelLockOwn} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{rellockown}; + $data->{stats}{server_rpc_V4Create} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{v4create}; + $data->{stats}{server_rpc_BackChannelCt} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{backchannelctrl}; + $data->{stats}{server_rpc_BindConnToSes} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{bindconntosess}; + $data->{stats}{server_rpc_ExchangeID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{exchangeid}; + $data->{stats}{server_rpc_CreateSess} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{createsess}; + $data->{stats}{server_rpc_DestroySess} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{destroysess}; + $data->{stats}{server_rpc_FreeStateID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{freestateid}; + $data->{stats}{server_rpc_GetDirDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdirdeleg}; + $data->{stats}{server_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{server_rpc_GetDevList} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevlist}; + $data->{stats}{server_rpc_layoutCommit} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutcommit}; + $data->{stats}{server_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{server_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{server_rpc_GetDirDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdirdeleg}; + $data->{stats}{server_rpc_GetDevInfo} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevinfo}; + $data->{stats}{server_rpc_GetDevList} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{getdevlist}; + $data->{stats}{server_rpc_layoutCommit} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutcommit}; + $data->{stats}{server_rpc_LayoutGet} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutget}; + $data->{stats}{server_rpc_LayoutReturn} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{layoutreturn}; + $data->{stats}{server_rpc_SecInfNoName} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{server_rpc_Sequence} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{sequence}; + $data->{stats}{server_rpc_SetSSV} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{setssv}; + $data->{stats}{server_rpc_TestStateID} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{teststateid}; + $data->{stats}{server_rpc_WantDeleg} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{wantdeleg}; + $data->{stats}{server_rpc_DestroyClId} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{destroyclid}; + $data->{stats}{server_rpc_ReclaimCompl} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv41}{reclaimcompl}; + $data->{stats}{server_rpc_Allocate} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{allocate}; + $data->{stats}{server_rpc_Copy} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{copy}; + $data->{stats}{server_rpc_CopyNotify} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{copynotify}; + $data->{stats}{server_rpc_Deallocate} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{deallocate}; + $data->{stats}{server_rpc_IOAdvise} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{ioadvise}; + $data->{stats}{server_rpc_LayoutError} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{layouterror}; + $data->{stats}{server_rpc_LayoutStats} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{layoutstats}; + $data->{stats}{server_rpc_OffloadCncl} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{offloadcncl}; + $data->{stats}{server_rpc_OffloadStat} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{offloadstat}; + $data->{stats}{server_rpc_ReadPlus} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{readplus}; + $data->{stats}{server_rpc_Seek} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{seek}; + $data->{stats}{server_rpc_WriteSame} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{writesame}; + $data->{stats}{server_rpc_Clone} = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{clone}; + $data->{stats}{server_rpc_GetExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{getextattr}; + $data->{stats}{server_rpc_SetExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{setextattr}; + $data->{stats}{server_rpc_ListExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{listextattr}; + $data->{stats}{server_rpc_RmExtattr} + = $stats_json->{nfsstat}{nfsv4}{serverstats}{operations}{nfsv42}{rmextattr}; + $data->{stats}{server_Clients} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{clients}; + $data->{stats}{server_OpenOwner} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{openowner}; + $data->{stats}{server_Opens} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{opens}; + $data->{stats}{server_LockOwner} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{lockowner}; + $data->{stats}{server_Locks} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{locks}; + $data->{stats}{server_Delegs} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{delegs}; + $data->{stats}{server_Layouts} = $stats_json->{nfsstat}{nfsv4}{serverstats}{server}{layouts}; + $data->{stats}{server_cache_Inprog} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{inprog}; + $data->{stats}{server_cache_NonIdem} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{nonidem}; + $data->{stats}{server_cache_Misses} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{misses}; + $data->{stats}{server_cache_CacheSize} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{cachesize}; + $data->{stats}{server_cache_TCPPeak} = $stats_json->{nfsstat}{nfsv4}{serverstats}{cache}{tcppeak}; + }; } ## end if ( $^O eq 'freebsd' ) #### @@ -640,169 +618,7 @@ if ( $^O eq 'linux' ) { $mode = 'server'; } if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { - ( - $data->{stats}{server_calls}, $data->{stats}{'server_badcalls'}, $data->{stats}{server_badfmt}, - $data->{stats}{server_badauth}, $data->{stats}{server_badclnt}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'client' ) { - ( $data->{stats}{server_null}, $data->{stats}{server_compound}, ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { - ( - $data->{stats}{'server_op0-unused'}, $data->{stats}{'server_op1-unused'}, - $data->{stats}{'server_op2-future'}, $data->{stats}{server_Access}, - $data->{stats}{server_Close}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Commit}, $data->{stats}{server_Create}, $data->{stats}{server_DelePurge}, - $data->{stats}{server_Delegs}, $data->{stats}{server_Getattr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { - ( - $data->{stats}{server_GetFH}, $data->{stats}{server_Link}, $data->{stats}{server_Lock}, - $data->{stats}{server_LockT}, $data->{stats}{server_LockU}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Lookup}, $data->{stats}{server_LookupP}, $data->{stats}{server_NVerify}, - $data->{stats}{server_Open}, $data->{stats}{server_OpenAttr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ putpubfh\ +putrootfh/ && $mode eq 'server' ) { - ( - $data->{stats}{server_OpenCfrm}, $data->{stats}{server_OpenDwnGr}, $data->{stats}{server_PutFH}, - $data->{stats}{server_PutPubFH}, $data->{stats}{server_PutRootFH}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Read}, $data->{stats}{server_Readdir}, $data->{stats}{server_Readlink}, - $data->{stats}{server_Remove}, $data->{stats}{server_Rename}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Renew}, $data->{stats}{server_RestoreFH}, $data->{stats}{server_SaveFH}, - $data->{stats}{server_Secinfo}, $data->{stats}{server_Setattr}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { - ( - $data->{stats}{server_SetClId}, $data->{stats}{server_SetClIdCf}, - $data->{stats}{server_Verify}, $data->{stats}{server_Write}, - $data->{stats}{server_RelLockOwn}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { - ( - $data->{stats}{server_BackChannelCt}, $data->{stats}{server_BindConnToSes}, - $data->{stats}{server_ExchangeID}, $data->{stats}{server_CreateSess}, - $data->{stats}{server_DestroySess}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { - ( - $data->{stats}{server_FreeStateID}, $data->{stats}{server_GetDirDeleg}, - $data->{stats}{server_GetDevInfo}, $data->{stats}{server_GetDevList}, - $data->{stats}{server_layoutCommit}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { - ( - $data->{stats}{server_LayoutGet}, $data->{stats}{server_LayoutReturn}, - $data->{stats}{server_SecInfNoName}, $data->{stats}{server_Sequence}, - $data->{stats}{server_SetSSV}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { - ( - $data->{stats}{server_TestStateID}, $data->{stats}{server_WantDeleg}, - $data->{stats}{server_DestroyClId}, $data->{stats}{server_ReclaimCompl}, - $data->{stats}{server_Allocate}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Copy}, $data->{stats}{server_CopyNotify}, - $data->{stats}{server_Deallocate}, $data->{stats}{server_IOAdvise}, - $data->{stats}{server_LayoutError}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { - ( - $data->{stats}{server_Layouts}, $data->{stats}{server_OffloadCncl}, - $data->{stats}{server_OffloadStat}, $data->{stats}{server_ReadPlus}, - $data->{stats}{server_Seek}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /write_same/ ) { - ( $data->{stats}{server_WriteSame} ) = split( /[\ \t]+/m, $line && $mode eq 'client' ); - } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ ) { - ( - $data->{stats}{client_rpc_info_Requests}, - $data->{stats}{client_rpc_info_Retries}, - $data->{stats}{client_rpc_info_X_Replies} - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_null}, $data->{stats}{client_rpc_Read}, - $data->{stats}{client_rpc_Write}, $data->{stats}{client_rpc_Commit}, - $data->{stats}{client_rpc_Open}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_OpenCfr}, $data->{stats}{client_rpc_OpenLayout}, - $data->{stats}{client_rpc_OpenDownGr}, $data->{stats}{client_rpc_Commit}, - $data->{stats}{client_rpc_Open}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_FSinfo}, $data->{stats}{client_rpc_renew}, - $data->{stats}{client_rpc_SetClId}, $data->{stats}{client_rpc_confirm}, - $data->{stats}{client_rpc_Lock}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_LockT}, $data->{stats}{client_rpc_LockU}, - $data->{stats}{client_rpc_Access}, $data->{stats}{client_rpc_Getattr}, - $data->{stats}{client_rpc_Lookup}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_LookOpen}, $data->{stats}{client_rpc_Remove}, - $data->{stats}{client_rpc_Rename}, $data->{stats}{client_rpc_Link}, - $data->{stats}{client_rpc_Symlink}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_Create}, $data->{stats}{client_rpc_pathConf}, - $data->{stats}{client_rpc_statfs}, $data->{stats}{client_rpc_Readlink}, - $data->{stats}{client_rpc_Readlink}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_server_caps}, $data->{stats}{client_rpc_DelegRet}, - $data->{stats}{client_rpc_GetAcl}, $data->{stats}{client_rpc_SetAcl}, - $data->{stats}{client_rpc_fs_locations}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_RelLckOwn}, $data->{stats}{client_rpc_secinfo}, - $data->{stats}{client_rpc_fsid_present}, $data->{stats}{client_rpc_ExchangeId}, - $data->{stats}{client_rpc_CreateSess}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_DestroySess}, $data->{stats}{client_rpc_sequence}, - $data->{stats}{client_rpc_get_lease_time}, $data->{stats}{client_rpc_ReclaimCompl}, - $data->{stats}{client_rpc_LayoutGet}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_GetDevInfo}, $data->{stats}{client_rpc_LayoutCommit}, - $data->{stats}{client_rpc_LayoutReturn}, $data->{stats}{client_rpc_secinfo_no}, - $data->{stats}{client_rpc_test_stateid}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_FreeStateID}, $data->{stats}{client_rpc_getdevicelist}, - $data->{stats}{client_rpc_BindConnSess}, $data->{stats}{client_rpc_DestroyClId}, - $data->{stats}{client_rpc_Seek}, - ) = split( /[\ \t]+/m, $line ); - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { - ( - $data->{stats}{client_rpc_Allocate}, $data->{stats}{client_rpc_Deallocate}, - $data->{stats}{client_rpc_layoutstats}, $data->{stats}{client_rpc_clone}, - ) = split( /[\ \t]+/m, $line ); + } $previous_line = $line; } ## end foreach my $line (@output_split) @@ -815,9 +631,9 @@ if ( $^O eq 'linux' ) { #### my @stat_keys = keys( %{ $data->{stats} } ); foreach my $item (@stat_keys) { - if ( $item =~ /^client/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { + if ( $item =~ /^client/ && defined( $data->{stats}{$item} ) && $data->{stats}{$item} > 0 ) { $data->{is_client} = 1; - } elsif ( $item =~ /^server/ && defined($data->{stats}{$item}) && $data->{stats}{$item} > 0 ) { + } elsif ( $item =~ /^server/ && defined( $data->{stats}{$item} ) && $data->{stats}{$item} > 0 ) { $data->{is_server} = 1; } } From 3faf2c2a5aa7a853b48266e524cf163e1cf673dd Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 9 Mar 2024 16:46:40 -0600 Subject: [PATCH 415/497] massively re-work it and clean it up --- snmp/nfs | 822 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 627 insertions(+), 195 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index 87f518458..c6391d0a5 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -6,7 +6,7 @@ nfs - LibreNMS JSON style SNMP extend for NFS monitoring =head1 VERSION -0.0.1 +0.0.2 =head1 SYNOPSIS @@ -132,198 +132,253 @@ my $data = { mounts => [], mounted_by => [], stats => { - client_rpc_Getattr => undef, - client_rpc_Setattr => undef, - client_rpc_Lookup => undef, - client_rpc_Readlink => undef, - client_rpc_Read => undef, - client_rpc_Write => undef, - client_rpc_Create => undef, - client_rpc_Remove => undef, - client_rpc_Rename => undef, - client_rpc_Link => undef, - client_rpc_Symlink => undef, - client_rpc_Mkdir => undef, - client_rpc_Rmdir => undef, - client_rpc_Readdir => undef, - client_rpc_RdirPlus => undef, - client_rpc_Access => undef, - client_rpc_Mknod => undef, - client_rpc_Fsstat => undef, - client_rpc_FSinfo => undef, - client_rpc_pathConf => undef, - client_rpc_Commit => undef, - client_rpc_SetClId => undef, - client_rpc_SetClIdCf => undef, - client_rpc_Lock => undef, - client_rpc_LockT => undef, - client_rpc_LockU => undef, - client_rpc_Open => undef, - client_rpc_OpenCfr => undef, - client_rpc_OpenDownGr => undef, - client_rpc_Close => undef, - client_rpc_RelLckOwn => undef, - client_rpc_FreeStateID => undef, - client_rpc_PutRootFH => undef, - client_rpc_DelegRet => undef, - client_rpc_GetAcl => undef, - client_rpc_SetAcl => undef, - client_rpc_ExchangeId => undef, - client_rpc_CreateSess => undef, - client_rpc_DestroySess => undef, - client_rpc_DestroyClId => undef, - client_rpc_LayoutGet => undef, - client_rpc_GetDevInfo => undef, - client_rpc_LayoutCommit => undef, - client_rpc_LayoutReturn => undef, - client_rpc_ReclaimCompl => undef, - client_rpc_ReadDataS => undef, - client_rpc_WriteDataS => undef, - client_rpc_CommitDataS => undef, - client_rpc_OpenLayout => undef, - client_rpc_CreateLayout => undef, - client_rpc_BindConnSess => undef, - client_rpc_LookupOpen => undef, - client_rpc_IOAdvise => undef, - client_rpc_Allocate => undef, - client_rpc_Copy => undef, - client_rpc_Seek => undef, - client_rpc_SeekDataS => undef, - client_rpc_GetExtattr => undef, - client_rpc_SetExtattr => undef, - client_rpc_RmExtattr => undef, - client_rpc_ListExtattr => undef, - client_rpc_Deallocate => undef, - client_rpc_LayoutError => undef, - client_OpenOwner => undef, - client_Opens => undef, - client_LockOwner => undef, - client_Locks => undef, - client_Delegs => undef, - client_LocalOwn => undef, - client_LocalOpen => undef, - client_LocalLown => undef, - client_LocalLock => undef, - client_Layouts => undef, - client_rpc_info_TimedOut => undef, - client_rpc_info_Invalid => undef, - client_rpc_info_X_Replies => undef, - client_rpc_info_Retries => undef, - client_rpc_info_Requests => undef, - client_cache_Attr_Hits => undef, - client_cache_Attr_Misses => undef, - client_cache_Lkup_Hits => undef, - client_cache_Lkup_Misses => undef, - client_cache_BioR_Hits => undef, - client_cache_BioR_Misses => undef, - client_cache_BioW_Hits => undef, - client_cache_BioW_Misses => undef, - client_cache_BioRL_Hits => undef, - client_cache_BioRL_Misses => undef, - client_cache_BioD_Hits => undef, - client_cache_BioD_Misses => undef, - client_cache_DirE_Hits => undef, - client_cache_DirE_Misses => undef, - server_rpc_Getattr => undef, - server_rpc_Setattr => undef, - server_rpc_Lookup => undef, - server_rpc_Readlink => undef, - server_rpc_Read => undef, - server_rpc_Write => undef, - server_rpc_Create => undef, - server_rpc_Remove => undef, - server_rpc_Rename => undef, - server_rpc_Link => undef, - server_rpc_Symlink => undef, - server_rpc_Mkdir => undef, - server_rpc_Rmdir => undef, - server_rpc_Readdir => undef, - server_rpc_RdirPlus => undef, - server_rpc_Access => undef, - server_rpc_Mknod => undef, - server_rpc_Fsstat => undef, - server_rpc_FSinfo => undef, - server_rpc_pathConf => undef, - server_rpc_Commit => undef, - server_rpc_LookupP => undef, - server_rpc_SetClId => undef, - server_rpc_SetClIdCf => undef, - server_rpc_Open => undef, - server_rpc_OpenAttr => undef, - server_rpc_OpenDwnGr => undef, - server_rpc_OpenCfrm => undef, - server_rpc_DelePurge => undef, - server_rpc_DelRet => undef, - server_rpc_GetFH => undef, - server_rpc_Lock => undef, - server_rpc_LockT => undef, - server_rpc_LockU => undef, - server_rpc_Close => undef, - server_rpc_Verify => undef, - server_rpc_NVerify => undef, - server_rpc_PutFH => undef, - server_rpc_PutPubFH => undef, - server_rpc_PutRootFH => undef, - server_rpc_Renew => undef, - server_rpc_RestoreFH => undef, - server_rpc_SaveFH => undef, - server_rpc_Secinfo => undef, - server_rpc_RelLockOwn => undef, - server_rpc_V4Create => undef, - server_rpc_BackChannelCt => undef, - server_rpc_BindConnToSes => undef, - server_rpc_ExchangeID => undef, - server_rpc_CreateSess => undef, - server_rpc_DestroySess => undef, - server_rpc_FreeStateID => undef, - server_rpc_GetDirDeleg => undef, - server_rpc_GetDevInfo => undef, - server_rpc_GetDevList => undef, - server_rpc_layoutCommit => undef, - server_rpc_LayoutGet => undef, - server_rpc_LayoutReturn => undef, - server_rpc_GetDirDeleg => undef, - server_rpc_GetDevInfo => undef, - server_rpc_GetDevList => undef, - server_rpc_layoutCommit => undef, - server_rpc_LayoutGet => undef, - server_rpc_LayoutReturn => undef, - server_rpc_SecInfNoName => undef, - server_rpc_Sequence => undef, - server_rpc_SetSSV => undef, - server_rpc_TestStateID => undef, - server_rpc_WantDeleg => undef, - server_rpc_DestroyClId => undef, - server_rpc_ReclaimCompl => undef, - server_rpc_Allocate => undef, - server_rpc_Copy => undef, - server_rpc_CopyNotify => undef, - server_rpc_Deallocate => undef, - server_rpc_IOAdvise => undef, - server_rpc_LayoutError => undef, - server_rpc_LayoutStats => undef, - server_rpc_OffloadCncl => undef, - server_rpc_OffloadStat => undef, - server_rpc_ReadPlus => undef, - server_rpc_Seek => undef, - server_rpc_WriteSame => undef, - server_rpc_Clone => undef, - server_rpc_GetExtattr => undef, - server_rpc_SetExtattr => undef, - server_rpc_ListExtattr => undef, - server_rpc_RmExtattr => undef, - server_Clients => undef, - server_OpenOwner => undef, - server_Opens => undef, - server_LockOwner => undef, - server_Locks => undef, - server_Delegs => undef, - server_Layouts => undef, - server_cache_Inprog => undef, - server_cache_NonIdem => undef, - server_cache_Misses => undef, - server_cache_CacheSize => undef, - server_cache_TCPPeak => undef, + client_rpc_null => 0, + client_rpc_root => 0, + client_rpc_confirm => 0, + client_rpc_server_caps => 0, + client_rpc_fs_locations => 0, + client_rpc_secinfo => 0, + client_rpc_fsid_present => 0, + client_rpc_sequence => 0, + client_rpc_get_lease_time => 0, + client_rpc_test_stateid => 0, + client_rpc_secinfo_no => 0, + client_rpc_getdevicelist => 0, + client_rpc_layoutstats => 0, + client_rpc_wrcache => 0, + client_rpc_Getattr => 0, + client_rpc_Setattr => 0, + client_rpc_Lookup => 0, + client_rpc_Readlink => 0, + client_rpc_Read => 0, + client_rpc_Write => 0, + client_rpc_Create => 0, + client_rpc_Remove => 0, + client_rpc_Rename => 0, + client_rpc_Link => 0, + client_rpc_Symlink => 0, + client_rpc_Mkdir => 0, + client_rpc_Rmdir => 0, + client_rpc_Readdir => 0, + client_rpc_RdirPlus => 0, + client_rpc_Access => 0, + client_rpc_Mknod => 0, + client_rpc_Fsstat => 0, + client_rpc_FSinfo => 0, + client_rpc_pathConf => 0, + client_rpc_Commit => 0, + client_rpc_SetClId => 0, + client_rpc_SetClIdCf => 0, + client_rpc_Lock => 0, + client_rpc_LockT => 0, + client_rpc_LockU => 0, + client_rpc_Open => 0, + client_rpc_OpenCfr => 0, + client_rpc_OpenDownGr => 0, + client_rpc_Close => 0, + client_rpc_RelLckOwn => 0, + client_rpc_FreeStateID => 0, + client_rpc_PutRootFH => 0, + client_rpc_DelegRet => 0, + client_rpc_GetAcl => 0, + client_rpc_SetAcl => 0, + client_rpc_ExchangeId => 0, + client_rpc_CreateSess => 0, + client_rpc_DestroySess => 0, + client_rpc_DestroyClId => 0, + client_rpc_LayoutGet => 0, + client_rpc_GetDevInfo => 0, + client_rpc_LayoutCommit => 0, + client_rpc_LayoutReturn => 0, + client_rpc_ReclaimCompl => 0, + client_rpc_ReadDataS => 0, + client_rpc_WriteDataS => 0, + client_rpc_CommitDataS => 0, + client_rpc_OpenLayout => 0, + client_rpc_CreateLayout => 0, + client_rpc_BindConnSess => 0, + client_rpc_LookupOpen => 0, + client_rpc_IOAdvise => 0, + client_rpc_Allocate => 0, + client_rpc_Copy => 0, + client_rpc_Seek => 0, + client_rpc_SeekDataS => 0, + client_rpc_GetExtattr => 0, + client_rpc_SetExtattr => 0, + client_rpc_RmExtattr => 0, + client_rpc_ListExtattr => 0, + client_rpc_Deallocate => 0, + client_rpc_LayoutError => 0, + client_OpenOwner => 0, + client_Opens => 0, + client_LockOwner => 0, + client_Locks => 0, + client_Delegs => 0, + client_LocalOwn => 0, + client_LocalOpen => 0, + client_LocalLown => 0, + client_LocalLock => 0, + client_Layouts => 0, + client_rpc_info_TimedOut => 0, + client_rpc_info_Invalid => 0, + client_rpc_info_X_Replies => 0, + client_rpc_info_Retries => 0, + client_rpc_info_Requests => 0, + client_rpc_info_authrefrsh => 0, + client_cache_Attr_Hits => 0, + client_cache_Attr_Misses => 0, + client_cache_Lkup_Hits => 0, + client_cache_Lkup_Misses => 0, + client_cache_BioR_Hits => 0, + client_cache_BioR_Misses => 0, + client_cache_BioW_Hits => 0, + client_cache_BioW_Misses => 0, + client_cache_BioRL_Hits => 0, + client_cache_BioRL_Misses => 0, + client_cache_BioD_Hits => 0, + client_cache_BioD_Misses => 0, + client_cache_DirE_Hits => 0, + client_cache_DirE_Misses => 0, + client_network_packets => 0, + client_network_udp => 0, + client_network_tcp => 0, + client_network_tcpconn => 0, + server_rpc_Getattr => 0, + server_rpc_Setattr => 0, + server_rpc_Lookup => 0, + server_rpc_Readlink => 0, + server_rpc_Read => 0, + server_rpc_Write => 0, + server_rpc_Create => 0, + server_rpc_Remove => 0, + server_rpc_Rename => 0, + server_rpc_Link => 0, + server_rpc_Symlink => 0, + server_rpc_Mkdir => 0, + server_rpc_Rmdir => 0, + server_rpc_Readdir => 0, + server_rpc_RdirPlus => 0, + server_rpc_Access => 0, + server_rpc_Mknod => 0, + server_rpc_Fsstat => 0, + server_rpc_FSinfo => 0, + server_rpc_pathConf => 0, + server_rpc_Commit => 0, + server_rpc_LookupP => 0, + server_rpc_SetClId => 0, + server_rpc_SetClIdCf => 0, + server_rpc_Open => 0, + server_rpc_OpenAttr => 0, + server_rpc_OpenDwnGr => 0, + server_rpc_OpenCfrm => 0, + server_rpc_DelePurge => 0, + server_rpc_DelRet => 0, + server_rpc_GetFH => 0, + server_rpc_Lock => 0, + server_rpc_LockT => 0, + server_rpc_LockU => 0, + server_rpc_Close => 0, + server_rpc_Verify => 0, + server_rpc_NVerify => 0, + server_rpc_PutFH => 0, + server_rpc_PutPubFH => 0, + server_rpc_PutRootFH => 0, + server_rpc_Renew => 0, + server_rpc_RestoreFH => 0, + server_rpc_SaveFH => 0, + server_rpc_Secinfo => 0, + server_rpc_RelLockOwn => 0, + server_rpc_V4Create => 0, + server_rpc_BackChannelCt => 0, + server_rpc_BindConnToSes => 0, + server_rpc_ExchangeID => 0, + server_rpc_CreateSess => 0, + server_rpc_DestroySess => 0, + server_rpc_FreeStateID => 0, + server_rpc_GetDirDeleg => 0, + server_rpc_GetDevInfo => 0, + server_rpc_GetDevList => 0, + server_rpc_layoutCommit => 0, + server_rpc_LayoutGet => 0, + server_rpc_LayoutReturn => 0, + server_rpc_GetDirDeleg => 0, + server_rpc_GetDevInfo => 0, + server_rpc_GetDevList => 0, + server_rpc_layoutCommit => 0, + server_rpc_LayoutGet => 0, + server_rpc_LayoutReturn => 0, + server_rpc_SecInfNoName => 0, + server_rpc_Sequence => 0, + server_rpc_SetSSV => 0, + server_rpc_TestStateID => 0, + server_rpc_WantDeleg => 0, + server_rpc_DestroyClId => 0, + server_rpc_ReclaimCompl => 0, + server_rpc_Allocate => 0, + server_rpc_Copy => 0, + server_rpc_CopyNotify => 0, + server_rpc_Deallocate => 0, + server_rpc_IOAdvise => 0, + server_rpc_LayoutError => 0, + server_rpc_LayoutStats => 0, + server_rpc_OffloadCncl => 0, + server_rpc_OffloadStat => 0, + server_rpc_ReadPlus => 0, + server_rpc_Seek => 0, + server_rpc_WriteSame => 0, + server_rpc_Clone => 0, + server_rpc_GetExtattr => 0, + server_rpc_SetExtattr => 0, + server_rpc_ListExtattr => 0, + server_rpc_RmExtattr => 0, + server_Clients => 0, + server_OpenOwner => 0, + server_Opens => 0, + server_LockOwner => 0, + server_Locks => 0, + server_Delegs => 0, + server_Layouts => 0, + server_network_packets => 0, + server_network_udp => 0, + server_network_tcp => 0, + server_network_tcpconn => 0, + server_rpcStats_calls => 0, + server_rpcStats_badcalls => 0, + server_rpcStats_badfmt => 0, + server_rpcStats_badauth => 0, + server_rpcStats_badclnt => 0, + server_cache_Inprog => 0, + server_cache_NonIdem => 0, + server_cache_Misses => 0, + server_cache_CacheSize => 0, + server_cache_TCPPeak => 0, + server_cache_hits => 0, + server_cache_nocache => 0, + server_io_read => 0, + server_io_write => 0, + server_RAcache_0 => 0, + server_RAcache_1 => 0, + server_RAcache_2 => 0, + server_RAcache_3 => 0, + server_RAcache_4 => 0, + server_RAcache_5 => 0, + server_RAcache_6 => 0, + server_RAcache_7 => 0, + server_RAcache_8 => 0, + server_RAcache_9 => 0, + server_RAcache_notfound => 0, + server_FHcache_lookup => 0, + server_FHcache_anon => 0, + server_FHcache_ncachedir => 0, + server_FHcache_ncachenondir => 0, + server_FHcache_stale => 0, + server_rpc_null => 0, + server_rpc_root => 0, + server_rpc_wrcache => 0, + server_rpc_compound => 0, + server_rpc_op0_unused => 0, + server_rpc_op1_unused => 0, + server_rpc_op2_future => 0, } }; @@ -611,14 +666,391 @@ if ( $^O eq 'linux' ) { my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; my $mode = ''; + foreach my $line (@output_split) { if ( $line =~ /^[Cc]lient/ ) { $mode = 'client'; } elsif ( $line =~ /^[Ss]erver/ ) { $mode = 'server'; } - if ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { - + if ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { + ( + $data->{stats}{server_network_packets}, $data->{stats}{server_network_udp}, + $data->{stats}{server_network_tcp}, $data->{stats}{server_network_tcpconn}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { + ( + $data->{stats}{server_rpcStats_calls}, $data->{stats}{server_rpcStats_badcalls}, + $data->{stats}{server_rpcStats_badfmt}, $data->{stats}{server_rpcStats_badauth}, + $data->{stats}{server_rpcStats_badclnt}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /hits\ +misses\ +nocache/ && $mode eq 'server' ) { + ( + $data->{stats}{server_cache_hits}, + $data->{stats}{server_cache_Misses}, + $data->{stats}{server_cache_nocache}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /read\ +write/ && $mode eq 'server' ) { + ( $data->{stats}{server_io_read}, $data->{stats}{server_io_write}, ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line + =~ /size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ + && $mode eq 'server' ) + { + ( + $data->{stats}{server_RAcache_0}, $data->{stats}{server_RAcache_1}, + $data->{stats}{server_RAcache_2}, $data->{stats}{server_RAcache_3}, + $data->{stats}{server_RAcache_4}, $data->{stats}{server_RAcache_5}, + $data->{stats}{server_RAcache_6}, $data->{stats}{server_RAcache_7}, + $data->{stats}{server_RAcache_8}, $data->{stats}{server_RAcache_9}, + $data->{stats}{server_RAcache_notfound} + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { + ( + $data->{stats}{server_FHcache_lookup}, $data->{stats}{server_FHcache_anon}, + $data->{stats}{server_FHcache_ncachedir}, $data->{stats}{server_FHcache_ncachenondir}, + $data->{stats}{server_FHcache_stale}, + ) = split( /[\t\ ]+/, $line ); + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_Getattr} += $split[1]; + $data->{stats}{server_rpc_Setattr} += $split[2]; + $data->{stats}{server_rpc_root} += $split[3]; + $data->{stats}{server_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Readlink} += $split[0]; + $data->{stats}{server_rpc_Read} += $split[1]; + $data->{stats}{server_rpc_wrcache} += $split[2]; + $data->{stats}{server_rpc_Write} += $split[3]; + $data->{stats}{server_rpc_Create} += $split[4]; + } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Remove} += $split[0]; + $data->{stats}{server_rpc_Rename} += $split[1]; + $data->{stats}{server_rpc_Link} += $split[2]; + $data->{stats}{server_rpc_Symlink} += $split[3]; + $data->{stats}{server_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Rmdir} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_Fsstat} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_Getattr} += $split[1]; + $data->{stats}{server_rpc_Setattr} += $split[2]; + $data->{stats}{server_rpc_Lookup} += $split[3]; + $data->{stats}{server_rpc_Access} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Readlink} += $split[0]; + $data->{stats}{server_rpc_Read} += $split[1]; + $data->{stats}{server_rpc_Write} += $split[2]; + $data->{stats}{server_rpc_Create} += $split[3]; + $data->{stats}{server_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Symlink} += $split[0]; + $data->{stats}{server_rpc_Mknod} += $split[1]; + $data->{stats}{server_rpc_Remove} += $split[2]; + $data->{stats}{server_rpc_Rmdir} += $split[3]; + $data->{stats}{server_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Link} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_ReadPlus} += $split[2]; + $data->{stats}{server_rpc_Fsstat} += $split[3]; + $data->{stats}{server_rpc_FSinfo} += $split[4]; + } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_pathConf} += $split[0]; + $data->{stats}{server_rpc_Commit} += $split[1]; + } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_null} += $split[0]; + $data->{stats}{server_rpc_compound} += $split[1]; + } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_op0_unused} += $split[0]; + $data->{stats}{server_rpc_op1_unused} += $split[1]; + $data->{stats}{server_rpc_op2_future} += $split[2]; + $data->{stats}{server_rpc_Access} += $split[3]; + $data->{stats}{server_rpc_Close} += $split[4]; + } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Commit} += $split[0]; + $data->{stats}{server_rpc_Create} += $split[1]; + $data->{stats}{server_rpc_DelePurge} = $split[2]; + $data->{stats}{server_rpc_DelRet} = $split[3]; + $data->{stats}{server_rpc_Getattr} += $split[4]; + } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_GetFH} += $split[0]; + $data->{stats}{server_rpc_Link} += $split[1]; + $data->{stats}{server_rpc_Lock} += $split[2]; + $data->{stats}{server_rpc_LockT} += $split[3]; + $data->{stats}{server_rpc_LockU} += $split[4]; + } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Lookup} += $split[0]; + $data->{stats}{server_rpc_LookupP} += $split[1]; + $data->{stats}{server_rpc_NVerify} += $split[2]; + $data->{stats}{server_rpc_Open} += $split[3]; + $data->{stats}{server_rpc_OpeAttr} += $split[4]; + } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_OpenCfrm} += $split[0]; + $data->{stats}{server_rpc_OpenDwnGr} += $split[1]; + $data->{stats}{server_rpc_PutFH} += $split[2]; + $data->{stats}{server_rpc_PutPubFH} += $split[3]; + $data->{stats}{server_rpc_PutRootFH} += $split[4]; + } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Read} += $split[0]; + $data->{stats}{server_rpc_Readdir} += $split[1]; + $data->{stats}{server_rpc_Readlink} += $split[2]; + $data->{stats}{server_rpc_Remove} += $split[3]; + $data->{stats}{server_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Renew} += $split[0]; + $data->{stats}{server_rpc_RestoreFH} += $split[1]; + $data->{stats}{server_rpc_SaveFH} += $split[2]; + $data->{stats}{server_rpc_Secinfo} += $split[3]; + $data->{stats}{server_rpc_Setattr} += $split[4]; + } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_SetClId} += $split[0]; + $data->{stats}{server_rpc_SetClIdCf} += $split[1]; + $data->{stats}{server_rpc_Verify} += $split[2]; + $data->{stats}{server_rpc_Write} += $split[3]; + $data->{stats}{server_rpc_RelLockOwn} += $split[4]; + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_BackChannelCt} += $split[0]; + $data->{stats}{server_rpc_BindConnToSes} += $split[1]; + $data->{stats}{server_rpc_ExchangeID} += $split[2]; + $data->{stats}{server_rpc_CreateSess} += $split[3]; + $data->{stats}{server_rpc_DestroySess} += $split[4]; + } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_BackChannelCt} += $split[0]; + $data->{stats}{server_rpc_BindConnToSes} += $split[1]; + $data->{stats}{server_rpc_ExchangeID} += $split[2]; + $data->{stats}{server_rpc_CreateSess} += $split[3]; + $data->{stats}{server_rpc_DestroySess} += $split[4]; + } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_FreeStateID} += $split[0]; + $data->{stats}{server_rpc_GetDirDeleg} += $split[1]; + $data->{stats}{server_rpc_GetDevInfo} += $split[2]; + $data->{stats}{server_rpc_GetDevList} += $split[3]; + $data->{stats}{server_rpc_layoutCommit} += $split[4]; + } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_LayoutGet} += $split[0]; + $data->{stats}{server_rpc_LayoutReturn} += $split[1]; + $data->{stats}{server_rpc_SecInfNoName} += $split[2]; + $data->{stats}{server_rpc_Sequence} += $split[3]; + $data->{stats}{server_rpc_SetSSV} += $split[4]; + } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_TestStateID} += $split[0]; + $data->{stats}{server_rpc_WantDeleg} += $split[1]; + $data->{stats}{server_rpc_DestroyClId} += $split[2]; + $data->{stats}{server_rpc_ReclaimCompl} += $split[3]; + $data->{stats}{server_rpc_Allocate} += $split[4]; + } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_Copy} += $split[0]; + $data->{stats}{server_rpc_CopyNotify} += $split[1]; + $data->{stats}{server_rpc_Deallocate} += $split[2]; + $data->{stats}{server_rpc_IOAdvise} += $split[3]; + $data->{stats}{server_rpc_LayoutError} += $split[4]; + } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ + && $mode eq 'server' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_LayoutStats} += $split[0]; + $data->{stats}{server_rpc_OffloadCncl} += $split[1]; + $data->{stats}{server_rpc_OffloadStat} += $split[2]; + $data->{stats}{server_rpc_ReadPlus} += $split[3]; + $data->{stats}{server_rpc_Seek} += $split[4]; + } elsif ( $previous_line =~ /write_same/ && $mode eq 'server' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{server_rpc_WriteSame} += $split[0]; + } elsif ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_network_packets} += $split[0]; + $data->{stats}{client_network_udp} += $split[1]; + $data->{stats}{client_network_tcp} += $split[2]; + $data->{stats}{client_network_tcpconn} += $split[3]; + } elsif ( $previous_line =~ /calls\ +retrans\ +authrefrsh/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_info_calls} += $split[0]; + $data->{stats}{client_rpc_info_Retries} += $split[1]; + $data->{stats}{client_rpc_info_authrefrsh} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Getattr} += $split[1]; + $data->{stats}{client_rpc_Setattr} += $split[2]; + $data->{stats}{client_rpc_root} += $split[3]; + $data->{stats}{client_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Readlink} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_wrcache} += $split[2]; + $data->{stats}{client_rpc_Write} += $split[3]; + $data->{stats}{client_rpc_Create} += $split[4]; + } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Remove} += $split[0]; + $data->{stats}{client_rpc_Rename} += $split[1]; + $data->{stats}{client_rpc_Link} += $split[2]; + $data->{stats}{client_rpc_Symlink} += $split[3]; + $data->{stats}{client_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Rmdir} += $split[0]; + $data->{stats}{client_rpc_Readdir} += $split[1]; + $data->{stats}{client_rpc_Fsstat} += $split[2]; + } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Getattr} += $split[1]; + $data->{stats}{client_rpc_Setattr} += $split[2]; + $data->{stats}{client_rpc_Lookup} += $split[3]; + $data->{stats}{client_rpc_Access} += $split[4]; + } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Readlink} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_Write} += $split[2]; + $data->{stats}{client_rpc_Create} += $split[3]; + $data->{stats}{client_rpc_Mkdir} += $split[4]; + } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Symlink} += $split[0]; + $data->{stats}{client_rpc_Mknod} += $split[1]; + $data->{stats}{client_rpc_Remove} += $split[2]; + $data->{stats}{client_rpc_Rmdir} += $split[3]; + $data->{stats}{client_rpc_Rename} += $split[4]; + } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Link} += $split[0]; + $data->{stats}{client_rpc_Readdir} += $split[1]; + $data->{stats}{client_rpc_Readdir} += $split[2]; + $data->{stats}{client_rpc_Fsstat} += $split[3]; + $data->{stats}{client_rpc_FSinfo} += $split[4]; + } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_pathConf} += $split[0]; + $data->{stats}{client_rpc_Commit} += $split[1]; + } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_null} += $split[0]; + $data->{stats}{client_rpc_Read} += $split[1]; + $data->{stats}{client_rpc_Write} += $split[2]; + $data->{stats}{client_rpc_Commit} += $split[3]; + $data->{stats}{client_rpc_Open} += $split[4]; + } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_OpenCfr} += $split[0]; + $data->{stats}{client_rpc_OpenLayout} += $split[1]; + $data->{stats}{client_rpc_OpenDownGr} += $split[2]; + $data->{stats}{client_rpc_Close} += $split[3]; + $data->{stats}{client_rpc_Setattr} += $split[4]; + } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_FSinfo} += $split[0]; + $data->{stats}{client_rpc_Rename} += $split[1]; + $data->{stats}{client_rpc_SetClId} += $split[2]; + $data->{stats}{client_rpc_confirm} += $split[3]; + $data->{stats}{client_rpc_Lock} += $split[4]; + } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_LockT} += $split[0]; + $data->{stats}{client_rpc_LockU} += $split[1]; + $data->{stats}{client_rpc_Access} += $split[2]; + $data->{stats}{client_rpc_Getattr} += $split[3]; + $data->{stats}{client_rpc_Lookup} += $split[4]; + } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_LookupOpen} += $split[0]; + $data->{stats}{client_rpc_Remove} += $split[1]; + $data->{stats}{client_rpc_Rename} += $split[2]; + $data->{stats}{client_rpc_Link} += $split[3]; + $data->{stats}{client_rpc_Symlink} += $split[4]; + } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Create} += $split[0]; + $data->{stats}{client_rpc_pathConf} += $split[1]; + $data->{stats}{client_rpc_Fsstat} += $split[2]; + $data->{stats}{client_rpc_Readlink} += $split[3]; + $data->{stats}{client_rpc_Readdir} += $split[4]; + } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_server_caps} += $split[0]; + $data->{stats}{client_rpc_DelegRet} += $split[1]; + $data->{stats}{client_rpc_Getattr} += $split[2]; + $data->{stats}{client_rpc_SetAcl} += $split[3]; + $data->{stats}{client_rpc_fs_locations} += $split[4]; + } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_RelLckOwn} += $split[0]; + $data->{stats}{client_rpc_secinfo} += $split[1]; + $data->{stats}{client_rpc_fsid_present} += $split[2]; + $data->{stats}{client_rpc_ExchangeId} += $split[3]; + $data->{stats}{client_rpc_CreateSess} += $split[4]; + } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_DestroySess} += $split[0]; + $data->{stats}{client_rpc_sequence} += $split[1]; + $data->{stats}{client_rpc_get_lease_time} += $split[2]; + $data->{stats}{client_rpc_ReclaimCompl} += $split[3]; + $data->{stats}{client_rpc_LayoutGet} += $split[4]; + } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_GetDevInfo} += $split[0]; + $data->{stats}{client_rpc_LayoutCommit} += $split[1]; + $data->{stats}{client_rpc_LayoutReturn} += $split[2]; + $data->{stats}{client_rpc_secinfo_no} += $split[3]; + $data->{stats}{client_rpc_test_stateid} += $split[4]; + } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ + && $mode eq 'client' ) + { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_FreeStateID} += $split[0]; + $data->{stats}{client_rpc_getdevicelist} += $split[1]; + $data->{stats}{client_rpc_BindConnSess} += $split[2]; + $data->{stats}{client_rpc_DestroyClId} += $split[3]; + $data->{stats}{client_rpc_Seek} += $split[4]; + } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { + my @split = split( /[\t\ ]+/, $line ); + $data->{stats}{client_rpc_Allocate} += $split[0]; + $data->{stats}{client_rpc_Deallocate} += $split[1]; + $data->{stats}{client_rpc_layoutstats} += $split[2]; + $data->{stats}{client_rpc_Close} += $split[3]; } $previous_line = $line; } ## end foreach my $line (@output_split) From 4da4dffe999656cad344065dcfe575f8f5c643d5 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 12 Mar 2024 22:34:48 -0500 Subject: [PATCH 416/497] fix linux stuff --- snmp/nfs | 136 +++++++++++++++++++++++-------------------------------- 1 file changed, 57 insertions(+), 79 deletions(-) diff --git a/snmp/nfs b/snmp/nfs index c6391d0a5..bc00b0e46 100755 --- a/snmp/nfs +++ b/snmp/nfs @@ -54,30 +54,6 @@ if possible. =cut -## -## -## General Notes -## -## -# -# FreeBSD used as the design basis given better stats produced and as well -# as actually documented. - -### -### -### Linux Notes -### -### -# -# What the following map to if if there is a FreeBSD equivalent is not clear. -# -# fs_locations -# test_stateid -# fsid_present -# open_conf -# confirm -# null - use strict; use warnings; use Getopt::Long; @@ -662,38 +638,40 @@ if ( $^O eq 'freebsd' ) { #### #### if ( $^O eq 'linux' ) { - my $output_raw = `nfsstat | sed 's/[0-9\.]*\%//g'`; + my $output_raw = `nfsstat -2 -3 -4 -v| sed 's/[0-9\.]*\%//g'`; my @output_split = split( /\n/, $output_raw ); my $previous_line = ''; my $mode = ''; foreach my $line (@output_split) { + $line =~ s/\t/\ /g; + $line =~ s/\ +$//g; if ( $line =~ /^[Cc]lient/ ) { $mode = 'client'; } elsif ( $line =~ /^[Ss]erver/ ) { $mode = 'server'; } - if ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { + if ( $previous_line =~ /^packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'server' ) { ( $data->{stats}{server_network_packets}, $data->{stats}{server_network_udp}, $data->{stats}{server_network_tcp}, $data->{stats}{server_network_tcpconn}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^calls\ +badcalls\ +badfmt\ +badauth\ +badclnt/ && $mode eq 'server' ) { ( $data->{stats}{server_rpcStats_calls}, $data->{stats}{server_rpcStats_badcalls}, $data->{stats}{server_rpcStats_badfmt}, $data->{stats}{server_rpcStats_badauth}, $data->{stats}{server_rpcStats_badclnt}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /hits\ +misses\ +nocache/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^hits\ +misses\ +nocache/ && $mode eq 'server' ) { ( $data->{stats}{server_cache_hits}, $data->{stats}{server_cache_Misses}, $data->{stats}{server_cache_nocache}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /read\ +write/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^read\ +write/ && $mode eq 'server' ) { ( $data->{stats}{server_io_read}, $data->{stats}{server_io_write}, ) = split( /[\t\ ]+/, $line ); } elsif ( $previous_line - =~ /size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ + =~ /^size\ +0\-10\%\ +10\-20\%\ +20\-3\0%\ +30\-40\%\ +40\-50\%\ +50\-60\%\ +60\-70\%\ +70\-80\%\ +80\-90\%\ +90\-100\%\ +notfound/ && $mode eq 'server' ) { ( @@ -704,131 +682,131 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_RAcache_8}, $data->{stats}{server_RAcache_9}, $data->{stats}{server_RAcache_notfound} ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^lookup\ +anon\ +ncachedir\ +ncachenondir\ +stale/ && $mode eq 'server' ) { ( $data->{stats}{server_FHcache_lookup}, $data->{stats}{server_FHcache_anon}, $data->{stats}{server_FHcache_ncachedir}, $data->{stats}{server_FHcache_ncachenondir}, $data->{stats}{server_FHcache_stale}, ) = split( /[\t\ ]+/, $line ); - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_Getattr} += $split[1]; $data->{stats}{server_rpc_Setattr} += $split[2]; $data->{stats}{server_rpc_root} += $split[3]; $data->{stats}{server_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Readlink} += $split[0]; $data->{stats}{server_rpc_Read} += $split[1]; $data->{stats}{server_rpc_wrcache} += $split[2]; $data->{stats}{server_rpc_Write} += $split[3]; $data->{stats}{server_rpc_Create} += $split[4]; - } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Remove} += $split[0]; $data->{stats}{server_rpc_Rename} += $split[1]; $data->{stats}{server_rpc_Link} += $split[2]; $data->{stats}{server_rpc_Symlink} += $split[3]; $data->{stats}{server_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^rmdir\ +readdir\ +fsstat/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Rmdir} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_Fsstat} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_Getattr} += $split[1]; $data->{stats}{server_rpc_Setattr} += $split[2]; $data->{stats}{server_rpc_Lookup} += $split[3]; $data->{stats}{server_rpc_Access} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Readlink} += $split[0]; $data->{stats}{server_rpc_Read} += $split[1]; $data->{stats}{server_rpc_Write} += $split[2]; $data->{stats}{server_rpc_Create} += $split[3]; $data->{stats}{server_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Symlink} += $split[0]; $data->{stats}{server_rpc_Mknod} += $split[1]; $data->{stats}{server_rpc_Remove} += $split[2]; $data->{stats}{server_rpc_Rmdir} += $split[3]; $data->{stats}{server_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Link} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_ReadPlus} += $split[2]; $data->{stats}{server_rpc_Fsstat} += $split[3]; $data->{stats}{server_rpc_FSinfo} += $split[4]; - } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^pathconf\ +commit/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_pathConf} += $split[0]; $data->{stats}{server_rpc_Commit} += $split[1]; - } elsif ( $previous_line =~ /null\ +compound/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^null\ +compound/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_null} += $split[0]; $data->{stats}{server_rpc_compound} += $split[1]; - } elsif ( $previous_line =~ /op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^op0\-unused\ +op1\-unused\ +op2\-future\ +access\ +close/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_op0_unused} += $split[0]; $data->{stats}{server_rpc_op1_unused} += $split[1]; $data->{stats}{server_rpc_op2_future} += $split[2]; $data->{stats}{server_rpc_Access} += $split[3]; $data->{stats}{server_rpc_Close} += $split[4]; - } elsif ( $previous_line =~ /commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^commit\ +create\ +delegpurge\ +delegreturn\ +getattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Commit} += $split[0]; $data->{stats}{server_rpc_Create} += $split[1]; $data->{stats}{server_rpc_DelePurge} = $split[2]; $data->{stats}{server_rpc_DelRet} = $split[3]; $data->{stats}{server_rpc_Getattr} += $split[4]; - } elsif ( $previous_line =~ /getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^getfh\ +link\ +lock\ +lockt\ +locku/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_GetFH} += $split[0]; $data->{stats}{server_rpc_Link} += $split[1]; $data->{stats}{server_rpc_Lock} += $split[2]; $data->{stats}{server_rpc_LockT} += $split[3]; $data->{stats}{server_rpc_LockU} += $split[4]; - } elsif ( $previous_line =~ /lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^lookup\ +lookup_root\ +nverify\ +open\ +openattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Lookup} += $split[0]; $data->{stats}{server_rpc_LookupP} += $split[1]; $data->{stats}{server_rpc_NVerify} += $split[2]; $data->{stats}{server_rpc_Open} += $split[3]; $data->{stats}{server_rpc_OpeAttr} += $split[4]; - } elsif ( $previous_line =~ /open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^open_conf\ +open_dgrd\ +putfh\ +putpubfh\ +putrootfh/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_OpenCfrm} += $split[0]; $data->{stats}{server_rpc_OpenDwnGr} += $split[1]; $data->{stats}{server_rpc_PutFH} += $split[2]; $data->{stats}{server_rpc_PutPubFH} += $split[3]; $data->{stats}{server_rpc_PutRootFH} += $split[4]; - } elsif ( $previous_line =~ /read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^read\ +readdir\ +readlink\ +remove\ +rename/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Read} += $split[0]; $data->{stats}{server_rpc_Readdir} += $split[1]; $data->{stats}{server_rpc_Readlink} += $split[2]; $data->{stats}{server_rpc_Remove} += $split[3]; $data->{stats}{server_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^renew\ +restorefh\ +savefh\ +secinfo\ +setattr/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Renew} += $split[0]; $data->{stats}{server_rpc_RestoreFH} += $split[1]; $data->{stats}{server_rpc_SaveFH} += $split[2]; $data->{stats}{server_rpc_Secinfo} += $split[3]; $data->{stats}{server_rpc_Setattr} += $split[4]; - } elsif ( $previous_line =~ /setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^setcltid\ +setcltidconf\ +verify\ +write\ +rellockowner/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_SetClId} += $split[0]; $data->{stats}{server_rpc_SetClIdCf} += $split[1]; $data->{stats}{server_rpc_Verify} += $split[2]; $data->{stats}{server_rpc_Write} += $split[3]; $data->{stats}{server_rpc_RelLockOwn} += $split[4]; - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + } elsif ( $previous_line =~ /^bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_BackChannelCt} += $split[0]; @@ -836,7 +814,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_ExchangeID} += $split[2]; $data->{stats}{server_rpc_CreateSess} += $split[3]; $data->{stats}{server_rpc_DestroySess} += $split[4]; - } elsif ( $previous_line =~ /bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) + } elsif ( $previous_line =~ /^bc_ctl\ +bind_conn\ +exchange_id\ +create_ses\ +destroy_ses/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_BackChannelCt} += $split[0]; @@ -844,7 +822,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_ExchangeID} += $split[2]; $data->{stats}{server_rpc_CreateSess} += $split[3]; $data->{stats}{server_rpc_DestroySess} += $split[4]; - } elsif ( $previous_line =~ /free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ + } elsif ( $previous_line =~ /^free_stateid\ +getdirdeleg\ +getdevinfo\ +getdevlist\ +layoutcommit/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -853,7 +831,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_GetDevInfo} += $split[2]; $data->{stats}{server_rpc_GetDevList} += $split[3]; $data->{stats}{server_rpc_layoutCommit} += $split[4]; - } elsif ( $previous_line =~ /layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ + } elsif ( $previous_line =~ /^layoutget\ +layoutreturn\ +secinfononam\ +sequence\ +set_ssv/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -862,7 +840,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_SecInfNoName} += $split[2]; $data->{stats}{server_rpc_Sequence} += $split[3]; $data->{stats}{server_rpc_SetSSV} += $split[4]; - } elsif ( $previous_line =~ /test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ + } elsif ( $previous_line =~ /^test_stateid\ +want_deleg\ +destroy_clid\ +reclaim_comp\ +allocate/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -871,14 +849,14 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_DestroyClId} += $split[2]; $data->{stats}{server_rpc_ReclaimCompl} += $split[3]; $data->{stats}{server_rpc_Allocate} += $split[4]; - } elsif ( $previous_line =~ /copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^copy\ +copy_notify\ +deallocate\ +ioadvise\ +layouterror/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_Copy} += $split[0]; $data->{stats}{server_rpc_CopyNotify} += $split[1]; $data->{stats}{server_rpc_Deallocate} += $split[2]; $data->{stats}{server_rpc_IOAdvise} += $split[3]; $data->{stats}{server_rpc_LayoutError} += $split[4]; - } elsif ( $previous_line =~ /layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ + } elsif ( $previous_line =~ /^layoutstats\ +offloadcancel\ +offloadstatus\ +readplus\ +seek/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); @@ -887,10 +865,10 @@ if ( $^O eq 'linux' ) { $data->{stats}{server_rpc_OffloadStat} += $split[2]; $data->{stats}{server_rpc_ReadPlus} += $split[3]; $data->{stats}{server_rpc_Seek} += $split[4]; - } elsif ( $previous_line =~ /write_same/ && $mode eq 'server' ) { + } elsif ( $previous_line =~ /^write_same/ && $mode eq 'server' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{server_rpc_WriteSame} += $split[0]; - } elsif ( $previous_line =~ /packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^packets\ +udp\ +tcp\ +tcpconn/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_network_packets} += $split[0]; $data->{stats}{client_network_udp} += $split[1]; @@ -901,107 +879,107 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_info_calls} += $split[0]; $data->{stats}{client_rpc_info_Retries} += $split[1]; $data->{stats}{client_rpc_info_authrefrsh} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +root\ +lookup/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Getattr} += $split[1]; $data->{stats}{client_rpc_Setattr} += $split[2]; $data->{stats}{client_rpc_root} += $split[3]; $data->{stats}{client_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +wrcache\ +write\ +create/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Readlink} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_wrcache} += $split[2]; $data->{stats}{client_rpc_Write} += $split[3]; $data->{stats}{client_rpc_Create} += $split[4]; - } elsif ( $previous_line =~ /remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^remove\ +rename\ +link\ +symlink\ +mkdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Remove} += $split[0]; $data->{stats}{client_rpc_Rename} += $split[1]; $data->{stats}{client_rpc_Link} += $split[2]; $data->{stats}{client_rpc_Symlink} += $split[3]; $data->{stats}{client_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^rmdir\ +readdir\ +fsstat/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Rmdir} += $split[0]; $data->{stats}{client_rpc_Readdir} += $split[1]; $data->{stats}{client_rpc_Fsstat} += $split[2]; - } elsif ( $previous_line =~ /null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +getattr\ +setattr\ +lookup\ +access/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Getattr} += $split[1]; $data->{stats}{client_rpc_Setattr} += $split[2]; $data->{stats}{client_rpc_Lookup} += $split[3]; $data->{stats}{client_rpc_Access} += $split[4]; - } elsif ( $previous_line =~ /readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^readlink\ +read\ +write\ +create\ +mkdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Readlink} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_Write} += $split[2]; $data->{stats}{client_rpc_Create} += $split[3]; $data->{stats}{client_rpc_Mkdir} += $split[4]; - } elsif ( $previous_line =~ /symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^symlink\ +mknod\ +remove\ +rmdir\ +rename/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Symlink} += $split[0]; $data->{stats}{client_rpc_Mknod} += $split[1]; $data->{stats}{client_rpc_Remove} += $split[2]; $data->{stats}{client_rpc_Rmdir} += $split[3]; $data->{stats}{client_rpc_Rename} += $split[4]; - } elsif ( $previous_line =~ /link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^link\ +readdir\ +readdirplus\ +fsstat\ +fsinfo/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Link} += $split[0]; $data->{stats}{client_rpc_Readdir} += $split[1]; $data->{stats}{client_rpc_Readdir} += $split[2]; $data->{stats}{client_rpc_Fsstat} += $split[3]; $data->{stats}{client_rpc_FSinfo} += $split[4]; - } elsif ( $previous_line =~ /pathconf\ +commit/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^pathconf\ +commit/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_pathConf} += $split[0]; $data->{stats}{client_rpc_Commit} += $split[1]; - } elsif ( $previous_line =~ /null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^null\ +read\ +write\ +commit\ +open/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_null} += $split[0]; $data->{stats}{client_rpc_Read} += $split[1]; $data->{stats}{client_rpc_Write} += $split[2]; $data->{stats}{client_rpc_Commit} += $split[3]; $data->{stats}{client_rpc_Open} += $split[4]; - } elsif ( $previous_line =~ /open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^open_conf\ +open_noat\ +open_dgrd\ +close\ +setattr/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_OpenCfr} += $split[0]; $data->{stats}{client_rpc_OpenLayout} += $split[1]; $data->{stats}{client_rpc_OpenDownGr} += $split[2]; $data->{stats}{client_rpc_Close} += $split[3]; $data->{stats}{client_rpc_Setattr} += $split[4]; - } elsif ( $previous_line =~ /fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^fsinfo\ +renew\ +setclntid\ +confirm\ +lock/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_FSinfo} += $split[0]; $data->{stats}{client_rpc_Rename} += $split[1]; $data->{stats}{client_rpc_SetClId} += $split[2]; $data->{stats}{client_rpc_confirm} += $split[3]; $data->{stats}{client_rpc_Lock} += $split[4]; - } elsif ( $previous_line =~ /lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^lockt\ +locku\ +access\ +getattr\ +lookup/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_LockT} += $split[0]; $data->{stats}{client_rpc_LockU} += $split[1]; $data->{stats}{client_rpc_Access} += $split[2]; $data->{stats}{client_rpc_Getattr} += $split[3]; $data->{stats}{client_rpc_Lookup} += $split[4]; - } elsif ( $previous_line =~ /lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^lookup_root\ +remove\ +rename\ +link\ +symlink/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_LookupOpen} += $split[0]; $data->{stats}{client_rpc_Remove} += $split[1]; $data->{stats}{client_rpc_Rename} += $split[2]; $data->{stats}{client_rpc_Link} += $split[3]; $data->{stats}{client_rpc_Symlink} += $split[4]; - } elsif ( $previous_line =~ /create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^create\ +pathconf\ +statfs\ +readlink\ +readdir/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Create} += $split[0]; $data->{stats}{client_rpc_pathConf} += $split[1]; $data->{stats}{client_rpc_Fsstat} += $split[2]; $data->{stats}{client_rpc_Readlink} += $split[3]; $data->{stats}{client_rpc_Readdir} += $split[4]; - } elsif ( $previous_line =~ /server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) + } elsif ( $previous_line =~ /^server_caps\ +delegreturn\ +getacl\ +setacl\ +fs_locations/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_server_caps} += $split[0]; @@ -1009,7 +987,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_Getattr} += $split[2]; $data->{stats}{client_rpc_SetAcl} += $split[3]; $data->{stats}{client_rpc_fs_locations} += $split[4]; - } elsif ( $previous_line =~ /rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ + } elsif ( $previous_line =~ /^rel_lkowner\ +secinfo\ +fsid_present\ +exchange_id\ +create_session/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1018,7 +996,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_fsid_present} += $split[2]; $data->{stats}{client_rpc_ExchangeId} += $split[3]; $data->{stats}{client_rpc_CreateSess} += $split[4]; - } elsif ( $previous_line =~ /destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ + } elsif ( $previous_line =~ /^destroy_session\ +sequence\ +get_lease_time\ +reclaim_comp\ +layoutget/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1027,7 +1005,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_get_lease_time} += $split[2]; $data->{stats}{client_rpc_ReclaimCompl} += $split[3]; $data->{stats}{client_rpc_LayoutGet} += $split[4]; - } elsif ( $previous_line =~ /getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ + } elsif ( $previous_line =~ /^getdevinfo\ +layoutcommit\ +layoutreturn\ +secinfo_no\ +test_stateid/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1036,7 +1014,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_LayoutReturn} += $split[2]; $data->{stats}{client_rpc_secinfo_no} += $split[3]; $data->{stats}{client_rpc_test_stateid} += $split[4]; - } elsif ( $previous_line =~ /free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ + } elsif ( $previous_line =~ /^free_stateid\ +getdevicelist\ +bind_conn_to_ses\ +destroy_clientid\ +seek/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); @@ -1045,7 +1023,7 @@ if ( $^O eq 'linux' ) { $data->{stats}{client_rpc_BindConnSess} += $split[2]; $data->{stats}{client_rpc_DestroyClId} += $split[3]; $data->{stats}{client_rpc_Seek} += $split[4]; - } elsif ( $previous_line =~ /allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { + } elsif ( $previous_line =~ /^allocate\ +deallocate\ +layoutstats\ +clone/ && $mode eq 'client' ) { my @split = split( /[\t\ ]+/, $line ); $data->{stats}{client_rpc_Allocate} += $split[0]; $data->{stats}{client_rpc_Deallocate} += $split[1]; From a5e907725cc507ebf334e2ac8ba462bdeb79b589 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 24 Mar 2024 20:20:57 -0500 Subject: [PATCH 417/497] initial add for poudriere --- snmp/poudriere | 356 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 356 insertions(+) create mode 100755 snmp/poudriere diff --git a/snmp/poudriere b/snmp/poudriere new file mode 100755 index 000000000..9469bb385 --- /dev/null +++ b/snmp/poudriere @@ -0,0 +1,356 @@ +#!/usr/bin/env perl + +=head1 NAME + +poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere + +=head1 VERSION + +0.0.1 + +=head1 SYNOPSIS + +poudriere [B<-w>] [B<-b>] [B<-o> ] + +poudriere --help|-h + +poudriere --version|-v + +=head1 SNMPD CONFIG + + extend poudriere /etc/snmp/extends/poudriere -b + +or if using cron... + + extend poudriere cat /var/cache/poudriere.json.snmp + +=head1 DESCRIPTION + +Uses showmount and nfsstat to gather information for the OSes below for NFS. + + FreeBSD + Linux + +=head1 FLAGS + +=head2 -w + +Write the results out. + +=head2 -b + +Print out the compressed data if GZip+Base64 is smaller. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/poudriere.json', +meaning it will be written out to the two locations. + + /var/cache/poudriere.json + /var/cache/poudriere.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +sub time_to_seconds{ + my $time=$_[0]; + + if (!defined($time)) { + return 0; + } + + if ($time=~/^0\:[0-9]+\.[0-9]+$/) { + $time=~s/^0\://; + return $time; + }elsif ($time=~/^[0-9]+\:[0-9]+\.[0-9]+$/) { + my $minutes=$time; + $minutes=~s/\:.*//; + $time=~s/.*\://; + $time = ($minutes * 60) + $time; + return $time; + }elsif ($time=~/^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/) { + my $days=$time; + $days=~s/D\:.*$//; + my $minutes=$time; + $minutes=~s/^.*D\://; + $minutes=~s/\:.*//; + $time = ($days * 86400) + ($minutes * 60) + $time; + return $time; + } + + # return 0 for anything unknown + return 0; +} + +#the version of returned data +my $VERSION = 1; + +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin'; + +my $pretty; +my $cache_base = '/var/cache/poudriere.json'; +my $write; +my $compress; +my $version; +my $help; +GetOptions( + 'o=s' => \$cache_base, + w => \$write, + b => \$compress, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + status => '', + build_info => '', + not_done => 0, + stats => { + 'copy-on-write-faults' => 0, + 'cpu-time' => 0, + 'data-size' => 0, + 'elapsed-times' => 0, + 'involuntary-context-switches' => 0, + 'job-control-count' => 0, + 'major-faults' => 0, + 'minor-faults' => 0, + 'percent-cpu' => 0, + 'percent-memory' => 0, + 'read-blocks' => 0, + 'received-messages' => 0, + 'rss' => 0, + 'sent-messages' => 0, + 'stack-size' => 0, + 'swaps' => 0, + 'system-time' => 0, + 'text-size' => 0, + 'threads' => 0, + 'user-time' => 0, + 'voluntary-context-switches' => 0, + 'written-blocks' => 0, + 'QUEUE' => 0, + 'BUILT' => 0, + 'FAIL' => 0, + 'SKIP' => 0, + 'IGNORE' => 0, + 'FETCH' => 0, + 'REMAIN' => 0, + 'TIME' => 0, + }, + jailANDportsANDset => {} +}; + +my @ps_stats = ( + 'copy-on-write-faults', 'cpu-time', + 'data-size', 'elapsed-times', + 'involuntary-context-switches', 'job-control-count', + 'major-faults', 'minor-faults', + 'percent-cpu', 'percent-memory', + 'read-blocks', 'received-messages', + 'rss', 'sent-messages', 'stack-size', + 'swaps', 'system-time', + 'text-size', 'threads', + 'user-time', 'voluntary-context-switches', +); + +my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'REMAIN', 'TIME' ); + +### +### +### get basic info via calling poudriere status +### +### + +my $status_raw = `poudriere -N status -f 2> /dev/null`; +if ( $? == 0 ) { + $data->{status} = $status_raw; + $data->{build_info} = `poudriere -N status -f -b 2>&1`; + + my @status_split = split( /\n/, $status_raw ); + my $status_split_int = 1; + while ( defined( $status_split[$status_split_int] ) ) { + + my $jls; + eval { $jls = decode_json(`jls --libxo json`); }; + if ($@) { + $jls = { 'jail-information' => { jail => [] } }; + } + + my $found = { + 'copy-on-write-faults' => 0, + 'cpu-time' => 0, + 'data-size' => 0, + 'elapsed-times' => 0, + 'involuntary-context-switches' => 0, + 'job-control-count' => 0, + 'major-faults' => 0, + 'minor-faults' => 0, + 'percent-cpu' => 0, + 'percent-memory' => 0, + 'read-blocks' => 0, + 'received-messages' => 0, + 'rss' => 0, + 'sent-messages' => 0, + 'stack-size' => 0, + 'swaps' => 0, + 'system-time' => 0, + 'text-size' => 0, + 'threads' => 0, + 'user-time' => 0, + 'voluntary-context-switches' => 0, + }; + ( + $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, + $found->{QUEUE}, $found->{BUILT}, $found->{FAIL}, $found->{SKIP}, $found->{IGNORE}, + $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} + ) = split( / +/, $status_split[$status_split_int], 14 ); + + if ($found->{STATUS} ne 'done') { + $data->{not_done}=1; + } + + my $jailANDportsANDset; + if ( $found->{SET} eq '-' ) { + $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS}; + } else { + $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; + } + + foreach my $item (@poudriere_stats) { + if ($item eq 'TIME') { + $found->{$item} = time_to_seconds($found->{$item}); + } + $data->{stats}{$item} += $found->{$item}; + } + + ## + ## find the jails + ## + my @jails; + my $jail_regex='^'.$jailANDportsANDset.'-job-[0-9]+'; + my $jls_int=0; + while (defined( $jls->{'jail-information'}{jail}[$jls_int] )) { + if ( + $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || + $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ + ) { + push(@jails, $jls->{'jail-information'}{jail}[$jls_int]{jid}); + } + $jls_int++; + } + + ## + ## if we have found jails, grab the information via ps + ## + if (defined($jails[0])) { + my $jails_string=join(',', @jails); + + my $ps; + eval { + $ps = decode_json(`ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string`); + }; + if ($@) { + $ps = { 'process-information' => { process => [] } }; + } + my $ps_int=0; + while (defined( $ps->{'process-information'}{process}[$ps_int] )) { + foreach my $item (@ps_stats) { + if ($item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time') { + $ps->{'process-information'}{process}[$ps_int]{$item} = time_to_seconds($ps->{'process-information'}{process}[$ps_int]{$item}); + } + $data->{stats}{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; + $found->{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; + } + $ps_int++; + } + } + + $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; + $status_split_int++; + } ## end while ( defined( $status_split[$status_split_int...])) +} else { + $to_return->{error} = 1; + $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; +} + +### +### +### finalize it +### +### + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + write_file( $cache_base . '.snmp', $raw_json ); + } else { + write_file( $cache_base . '.snmp', $compressed ); + $print_compressed = 1; + } + + if ( $compress && $print_compressed ) { + print $compressed; + } else { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + if ( length($compressed) > length($raw_json) ) { + print $raw_json; + } else { + print $compressed; + } +} ## end else [ if ($write) ] From c5946d0f3d41764dad47d4ef1e401773ae0cc19d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Mar 2024 20:41:30 -0500 Subject: [PATCH 418/497] gatcher info on build stage --- snmp/poudriere | 193 ++++++++++++++++++++++++++++++++++--------------- 1 file changed, 136 insertions(+), 57 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 9469bb385..900fd9b64 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -26,11 +26,6 @@ or if using cron... =head1 DESCRIPTION -Uses showmount and nfsstat to gather information for the OSes below for NFS. - - FreeBSD - Linux - =head1 FLAGS =head2 -w @@ -63,35 +58,35 @@ use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; use JSON; -sub time_to_seconds{ - my $time=$_[0]; +sub time_to_seconds { + my $time = $_[0]; - if (!defined($time)) { + if ( !defined($time) ) { return 0; } - if ($time=~/^0\:[0-9]+\.[0-9]+$/) { - $time=~s/^0\://; + if ( $time =~ /^0\:[0-9]+\.[0-9]+$/ ) { + $time =~ s/^0\://; return $time; - }elsif ($time=~/^[0-9]+\:[0-9]+\.[0-9]+$/) { - my $minutes=$time; - $minutes=~s/\:.*//; - $time=~s/.*\://; - $time = ($minutes * 60) + $time; + } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ ) { + my $minutes = $time; + $minutes =~ s/\:.*//; + $time =~ s/.*\://; + $time = ( $minutes * 60 ) + $time; return $time; - }elsif ($time=~/^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/) { - my $days=$time; - $days=~s/D\:.*$//; - my $minutes=$time; - $minutes=~s/^.*D\://; - $minutes=~s/\:.*//; - $time = ($days * 86400) + ($minutes * 60) + $time; + } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ ) { + my $days = $time; + $days =~ s/D\:.*$//; + my $minutes = $time; + $minutes =~ s/^.*D\://; + $minutes =~ s/\:.*//; + $time = ( $days * 86400 ) + ( $minutes * 60 ) + $time; return $time; } # return 0 for anything unknown return 0; -} +} ## end sub time_to_seconds #the version of returned data my $VERSION = 1; @@ -133,8 +128,8 @@ my $to_return = { }; my $data = { status => '', - build_info => '', - not_done => 0, + build_info => '', + not_done => 0, stats => { 'copy-on-write-faults' => 0, 'cpu-time' => 0, @@ -166,21 +161,32 @@ my $data = { 'FETCH' => 0, 'REMAIN' => 0, 'TIME' => 0, + 'check-sanity' => 0, + 'pkg-depends' => 0, + 'fetch-depends' => 0, + 'fetch checksum' => 0, + 'extract-depends' => 0, + 'extract' => 0, + 'patch-depends' => 0, + 'patch' => 0, + 'build-depends' => 0, + 'lib-depends' => 0, + 'configure' => 0, + 'build' => 0, + 'run-depends' => 0, + 'stage' => 0, + 'package' => 0, }, jailANDportsANDset => {} }; my @ps_stats = ( - 'copy-on-write-faults', 'cpu-time', - 'data-size', 'elapsed-times', - 'involuntary-context-switches', 'job-control-count', - 'major-faults', 'minor-faults', - 'percent-cpu', 'percent-memory', - 'read-blocks', 'received-messages', - 'rss', 'sent-messages', 'stack-size', - 'swaps', 'system-time', - 'text-size', 'threads', - 'user-time', 'voluntary-context-switches', + 'copy-on-write-faults', 'cpu-time', 'data-size', 'elapsed-times', + 'involuntary-context-switches', 'job-control-count', 'major-faults', 'minor-faults', + 'percent-cpu', 'percent-memory', 'read-blocks', 'received-messages', + 'rss', 'sent-messages', 'stack-size', 'swaps', + 'system-time', 'text-size', 'threads', 'user-time', + 'voluntary-context-switches', ); my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'REMAIN', 'TIME' ); @@ -228,6 +234,21 @@ if ( $? == 0 ) { 'threads' => 0, 'user-time' => 0, 'voluntary-context-switches' => 0, + 'check-sanity' => 0, + 'pkg-depends' => 0, + 'fetch-depends' => 0, + 'fetch checksum' => 0, + 'extract-depends' => 0, + 'extract' => 0, + 'patch-depends' => 0, + 'patch' => 0, + 'build-depends' => 0, + 'lib-depends' => 0, + 'configure' => 0, + 'build' => 0, + 'run-depends' => 0, + 'stage' => 0, + 'package' => 0, }; ( $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, @@ -235,8 +256,8 @@ if ( $? == 0 ) { $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} ) = split( / +/, $status_split[$status_split_int], 14 ); - if ($found->{STATUS} ne 'done') { - $data->{not_done}=1; + if ( $found->{STATUS} ne 'done' ) { + $data->{not_done} = 1; } my $jailANDportsANDset; @@ -247,8 +268,8 @@ if ( $? == 0 ) { } foreach my $item (@poudriere_stats) { - if ($item eq 'TIME') { - $found->{$item} = time_to_seconds($found->{$item}); + if ( $item eq 'TIME' ) { + $found->{$item} = time_to_seconds( $found->{$item} ); } $data->{stats}{$item} += $found->{$item}; } @@ -257,14 +278,13 @@ if ( $? == 0 ) { ## find the jails ## my @jails; - my $jail_regex='^'.$jailANDportsANDset.'-job-[0-9]+'; - my $jls_int=0; - while (defined( $jls->{'jail-information'}{jail}[$jls_int] )) { - if ( - $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || - $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ - ) { - push(@jails, $jls->{'jail-information'}{jail}[$jls_int]{jid}); + my $jail_regex = '^' . $jailANDportsANDset . '-job-[0-9]+'; + my $jls_int = 0; + while ( defined( $jls->{'jail-information'}{jail}[$jls_int] ) ) { + if ( $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset + || $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ ) + { + push( @jails, $jls->{'jail-information'}{jail}[$jls_int]{jid} ); } $jls_int++; } @@ -272,32 +292,91 @@ if ( $? == 0 ) { ## ## if we have found jails, grab the information via ps ## - if (defined($jails[0])) { - my $jails_string=join(',', @jails); + if ( defined( $jails[0] ) ) { + my $jails_string = join( ',', @jails ); my $ps; eval { - $ps = decode_json(`ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string`); - }; + $ps + = decode_json( + `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string` + ); + }; if ($@) { $ps = { 'process-information' => { process => [] } }; } - my $ps_int=0; - while (defined( $ps->{'process-information'}{process}[$ps_int] )) { + my $ps_int = 0; + while ( defined( $ps->{'process-information'}{process}[$ps_int] ) ) { foreach my $item (@ps_stats) { - if ($item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time') { - $ps->{'process-information'}{process}[$ps_int]{$item} = time_to_seconds($ps->{'process-information'}{process}[$ps_int]{$item}); + if ( $item eq 'user-time' || $item eq 'cpu-time' || $item eq 'system-time' ) { + $ps->{'process-information'}{process}[$ps_int]{$item} + = time_to_seconds( $ps->{'process-information'}{process}[$ps_int]{$item} ); } $data->{stats}{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; $found->{$item} += $ps->{'process-information'}{process}[$ps_int]{$item}; } $ps_int++; - } - } + } ## end while ( defined( $ps->{'process-information'}...)) + } ## end if ( defined( $jails[0] ) ) $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; $status_split_int++; } ## end while ( defined( $status_split[$status_split_int...])) + + my @build_info_split = split( /\n/, $data->{build_info} ); + my $current_section; + foreach my $line (@build_info_split) { + if ( $line =~ /^\[.*\]\ \[.*\] .*Queued.*Built/ ) { + $current_section = $line; + $current_section =~ s/^\[//; + $current_section =~ s/\].*$//; + } elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) { + my $type; + if ( $line =~ /[\ \t]check\-sanity[\ \t]/ ) { + $type = 'check-sanity'; + } elsif ( $line =~ /[\ \t]pkg-depends[\ \t]/ ) { + $type = 'pkg-depends'; + } elsif ( $line =~ /[\ \t]fetch-depends[\ \t]/ ) { + $type = 'fetch-depends'; + } elsif ( $line =~ /[\ \t]fetch[\ \t]/ ) { + $type = 'fetch'; + } elsif ( $line =~ /[\ \t]checksum[\ \t]/ ) { + $type = 'checksum'; + } elsif ( $line =~ /[\ \t]extract\-depends[\ \t]/ ) { + $type = 'extract-depends'; + } elsif ( $line =~ /[\ \t]extract[\ \t]/ ) { + $type = 'extract'; + } elsif ( $line =~ /[\ \t]patch-depends[\ \t]/ ) { + $type = 'patch-depends'; + } elsif ( $line =~ /[\ \t]lib\-depends[\ \t]/ ) { + $type = 'lib-depends'; + } elsif ( $line =~ /[\ \t]configure[\ \t]/ ) { + $type = 'configure'; + } elsif ( $line =~ /[\ \t]build[\ \t]/ ) { + $type = 'build'; + } elsif ( $line =~ /[\ \t]build\-depends[\ \t]/ ) { + $type = 'build-depends'; + } elsif ( $line =~ /[\ \t]lib\-depends[\ \t]/ ) { + $type = 'lib-depends'; + } elsif ( $line =~ /[\ \t]configure[\ \t]/ ) { + $type = 'configure'; + } elsif ( $line =~ /[\ \t]build[\ \t]/ ) { + $type = 'build'; + } elsif ( $line =~ /[\ \t]run\-depends[\ \t]/ ) { + $type = 'run-depends'; + } elsif ( $line =~ /[\ \t]stage[\ \t]/ ) { + $type = 'stage'; + } elsif ( $line =~ /[\ \t]package[\ \t]/ ) { + $type = 'package'; + } + if (defined($type)) { + $data->{stats}{$type}++; + if (defined($data->{jailANDportsANDset}{$current_section})) { + $data->{jailANDportsANDset}{$current_section}{$type}++; + } + } + } ## end elsif ( $line =~ /^\[[0-9]+\].*\/.*\|.*-.*\:/) + } ## end foreach my $line (@build_info_split) } else { $to_return->{error} = 1; $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; From f6c33460e9590e8b89c051720745d6937b48bed6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 26 Mar 2024 17:55:27 -0500 Subject: [PATCH 419/497] a few minor tweaks --- snmp/poudriere | 2 -- 1 file changed, 2 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 900fd9b64..b413fc895 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -24,8 +24,6 @@ or if using cron... extend poudriere cat /var/cache/poudriere.json.snmp -=head1 DESCRIPTION - =head1 FLAGS =head2 -w From 4178a227c1765b118a5ee0f893aa5c71dbd55599 Mon Sep 17 00:00:00 2001 From: Edwin Hoksberg Date: Tue, 23 Apr 2024 14:45:41 +0200 Subject: [PATCH 420/497] [add-opensearch-options] Add secure(-S) and disable hostname validation(-i) options for opensearch script --- snmp/opensearch | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 5b731b2eb..91f8752d4 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -35,6 +35,8 @@ Supported command line options are as below. Default: 127.0.0.1 -p The port to use. Default: 9200 + -S Use https instead of http. + -I Do not verify hostname (when used with -S). -P Pretty print. The last is only really relevant to the usage with SNMP. @@ -59,21 +61,27 @@ sub main::HELP_MESSAGE { . " Default: 127.0.0.1\n" . "-p The port to use.\n" . " Default: 9200\n" + . "-S Use https instead of http.\n" + . "-I Do not verify hostname (when used with -S).\n" . "-P Pretty print.\n"; } +my $protocol = 'http'; my $host = '127.0.0.1'; my $port = 9200; #gets the options my %opts; -getopts( 'h:p:P', \%opts ); +getopts( 'h:p:SIP', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } if ( defined( $opts{p} ) ) { $port = $opts{p}; } +if ( $opts{S} ) { + $protocol = 'https'; +} # my $to_return = { @@ -83,8 +91,8 @@ my $to_return = { date => {}, }; -my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; -my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; +my $stats_url = $protocol . '://' . $host . ':' . $port . '/_stats'; +my $health_url = $protocol . '://' . $host . ':' . $port . '/_cluster/health'; my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{P} ) { @@ -93,6 +101,10 @@ if ( $opts{P} ) { my $ua = LWP::UserAgent->new( timeout => 10 ); +if ( $opts{I} ) { + $ua->ssl_opts( verify_hostname => 0, SSL_verify_mode => 0x00 ); +} + my $stats_response = $ua->get($stats_url); my $stats_json; if ( $stats_response->is_success ) { From 7d2707faa25c312f94ed8feb405b4aefe25672c8 Mon Sep 17 00:00:00 2001 From: Marco Valle Date: Sun, 28 Apr 2024 18:50:24 +0200 Subject: [PATCH 421/497] HTTPS and CA file validation implemented. --- snmp/opensearch | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 5b731b2eb..300115a32 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -31,11 +31,14 @@ Add this to snmpd.conf as below and restart snmpd. Supported command line options are as below. + -c CA file path. + Default: empty -h The host to connect to. Default: 127.0.0.1 -p The port to use. Default: 9200 -P Pretty print. + -S Use HTTPS. The last is only really relevant to the usage with SNMP. @@ -55,25 +58,31 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n" + . "-c CA file path.\n" . "-h The host to connect to.\n" . " Default: 127.0.0.1\n" . "-p The port to use.\n" . " Default: 9200\n" - . "-P Pretty print.\n"; + . "-P Pretty print.\n" + . "-S Use HTTPS.\n"; } my $host = '127.0.0.1'; my $port = 9200; +my $schema = 'http'; #gets the options my %opts; -getopts( 'h:p:P', \%opts ); +getopts( 'c:h:p:P:S', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } if ( defined( $opts{p} ) ) { $port = $opts{p}; } +if ( $opts{S} ) { + $schema = 'https'; +} # my $to_return = { @@ -83,8 +92,8 @@ my $to_return = { date => {}, }; -my $stats_url = 'http://' . $host . ':' . $port . '/_stats'; -my $health_url = 'http://' . $host . ':' . $port . '/_cluster/health'; +my $stats_url = $schema . '://' . $host . ':' . $port . '/_stats'; +my $health_url = $schema . '://' . $host . ':' . $port . '/_cluster/health'; my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{P} ) { @@ -93,6 +102,11 @@ if ( $opts{P} ) { my $ua = LWP::UserAgent->new( timeout => 10 ); +if ( defined( $opts{c} ) ) { + # set ca file + $ua->ssl_opts( SSL_ca_file => $opts{c}); +} + my $stats_response = $ua->get($stats_url); my $stats_json; if ( $stats_response->is_success ) { From f417394def47cfb69d0cea20de770e8f4e378d7b Mon Sep 17 00:00:00 2001 From: Marco Valle Date: Sun, 28 Apr 2024 18:58:30 +0200 Subject: [PATCH 422/497] Authorization header from file implemented. --- snmp/opensearch | 28 +++++++++++++++++++++++++--- 1 file changed, 25 insertions(+), 3 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 300115a32..cac3d5093 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -31,6 +31,7 @@ Add this to snmpd.conf as below and restart snmpd. Supported command line options are as below. + -a Auth token path. -c CA file path. Default: empty -h The host to connect to. @@ -58,6 +59,7 @@ sub main::VERSION_MESSAGE { sub main::HELP_MESSAGE { print "\n" + . "-a Auth token path.\n" . "-c CA file path.\n" . "-h The host to connect to.\n" . " Default: 127.0.0.1\n" @@ -73,7 +75,7 @@ my $schema = 'http'; #gets the options my %opts; -getopts( 'c:h:p:P:S', \%opts ); +getopts( 'a:c:h:p:P:S', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } @@ -84,6 +86,14 @@ if ( $opts{S} ) { $schema = 'https'; } +my $auth_token; +if ( defined( $opts{a} ) ) { + open my $auth_file, '<', $opts{a}; + $auth_token = <$auth_file>; + close $auth_file; + chop $auth_token; +} + # my $to_return = { error => 0, @@ -107,7 +117,13 @@ if ( defined( $opts{c} ) ) { $ua->ssl_opts( SSL_ca_file => $opts{c}); } -my $stats_response = $ua->get($stats_url); +my $stats_response; +if ( defined( $opts{a} ) ) { + $stats_response = $ua->get($stats_url, "Authorization" => $auth_token,); +} else { + $stats_response = $ua->get($stats_url); +} + my $stats_json; if ( $stats_response->is_success ) { eval { $stats_json = decode_json( $stats_response->decoded_content ); }; @@ -131,7 +147,13 @@ else { exit; } -my $health_response = $ua->get($health_url); +my $health_response; +if ( defined( $opts{a} ) ) { + $health_response = $ua->get($health_url, "Authorization" => $auth_token,); +} else { + $health_response = $ua->get($health_url); +} + my $health_json; if ( $health_response->is_success ) { eval { $health_json = decode_json( $health_response->decoded_content ); }; From dcaebf6976cbbff19e03c54ff0371e052e913c11 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 12 May 2024 10:21:20 -0500 Subject: [PATCH 423/497] begin work on redis.pl --- snmp/redis.pl | 104 ++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 104 insertions(+) create mode 100644 snmp/redis.pl diff --git a/snmp/redis.pl b/snmp/redis.pl new file mode 100644 index 000000000..490ef6d81 --- /dev/null +++ b/snmp/redis.pl @@ -0,0 +1,104 @@ +#!/usr/bin/env perl + +=head1 NAME + +logsize - LinbreNMS JSON extend for redis. + +=head1 SYNOPSIS + +logsize [B<-B>] + +=head1 SWITCHES + +=head2 -B + +Do not the return output via GZip+Base64. + +=head1 SETUP + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-TOML p5-MIME-Base64 + # Debian + apt-get install libjson-perl libmime-base64-perl + +Create the cache dir, by default "/var/cache/". + +Then set it up in SNMPD. + + # if running it via cron + extend redis /usr/local/etc/snmp/redis.pl + +=cut + +use warnings; +use strict; +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print "LibreNMS redis extend 0.0.1\n"; +} + +sub main::HELP_MESSAGE { + print ' + +-B Do not use Gzip+Base64 for the output. +'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 1, + data => { + }, +}; + +#gets the options +my %opts = (); +getopts( 'B', \%opts ); + +# ensure that $ENV{PATH} has has it +$ENV{PATH}=$ENV{PATH}.':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; + +my $output_raw=`redis-cli info 2> /dev/null`; +if ($? != 0) { + $return_json->{error}=1; + $return_json->{error}='redis-cli info exited non-zero'; + print encode_json($return_json)."\n"; +} + +$output_raw=~s/\r//g; +my $section; +foreach my $line (split(/\n/, $output_raw)) { + if ($line ne '' && $line =~ /^# /) { + $line =~ s/^# //; + $section= $line; + $return_json->{data}{$section}={}; + }elsif ($line ne '' && defined($section)) { + my ($key, $value)=split(/\:/, $line); + if (defined($key) && defined($value)) { + $return_json->{data}{$section}{$key}=$value; + } + } +} + +my $return_json_raw=encode_json($return_json); +if ($opts{B}) { + print $return_json_raw."\n"; + exit 0; +} + +my $toReturnCompressed; +gzip \$return_json_raw => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +print $compressed; From 205df64e4d84ad77f747bda7146359a0bd7572e0 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 12 May 2024 10:41:14 -0500 Subject: [PATCH 424/497] add help/version info --- snmp/redis.pl | 70 +++++++++++++++++++++++++++++++-------------------- 1 file changed, 43 insertions(+), 27 deletions(-) mode change 100644 => 100755 snmp/redis.pl diff --git a/snmp/redis.pl b/snmp/redis.pl old mode 100644 new mode 100755 index 490ef6d81..3d4a80830 --- a/snmp/redis.pl +++ b/snmp/redis.pl @@ -2,7 +2,7 @@ =head1 NAME -logsize - LinbreNMS JSON extend for redis. +redis.pl - LinbreNMS JSON extend for redis. =head1 SYNOPSIS @@ -14,6 +14,14 @@ =head2 -B Do not the return output via GZip+Base64. +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + =head1 SETUP Install the depends. @@ -39,6 +47,7 @@ =head1 SETUP use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use File::Slurp; +use Pod::Usage; $Getopt::Std::STANDARD_HELP_VERSION = 1; @@ -47,52 +56,59 @@ sub main::VERSION_MESSAGE { } sub main::HELP_MESSAGE { - print ' - --B Do not use Gzip+Base64 for the output. -'; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } my $return_json = { error => 0, errorString => '', version => 1, - data => { - }, + data => {}, }; #gets the options my %opts = (); -getopts( 'B', \%opts ); +getopts( 'Bhv', \%opts ); + +if ( $opts{v} ) { + main::VERSION_MESSAGE; + exit 256; +} + +if ( $opts{h} ) { + main::VERSION_MESSAGE; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 256; +} # ensure that $ENV{PATH} has has it -$ENV{PATH}=$ENV{PATH}.':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; -my $output_raw=`redis-cli info 2> /dev/null`; -if ($? != 0) { - $return_json->{error}=1; - $return_json->{error}='redis-cli info exited non-zero'; - print encode_json($return_json)."\n"; +my $output_raw = `redis-cli info 2> /dev/null`; +if ( $? != 0 ) { + $return_json->{error} = 1; + $return_json->{error} = 'redis-cli info exited non-zero'; + print encode_json($return_json) . "\n"; } -$output_raw=~s/\r//g; +$output_raw =~ s/\r//g; my $section; -foreach my $line (split(/\n/, $output_raw)) { - if ($line ne '' && $line =~ /^# /) { +foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { $line =~ s/^# //; - $section= $line; - $return_json->{data}{$section}={}; - }elsif ($line ne '' && defined($section)) { - my ($key, $value)=split(/\:/, $line); - if (defined($key) && defined($value)) { - $return_json->{data}{$section}{$key}=$value; + $section = $line; + $return_json->{data}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{$section}{$key} = $value; } } -} +} ## end foreach my $line ( split( /\n/, $output_raw ) ) -my $return_json_raw=encode_json($return_json); -if ($opts{B}) { - print $return_json_raw."\n"; +my $return_json_raw = encode_json($return_json); +if ( $opts{B} ) { + print $return_json_raw. "\n"; exit 0; } From 2d2e3bcda805509b7d20e06fbd13ff4e02891e4c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 4 Jun 2024 00:51:47 -0500 Subject: [PATCH 425/497] ignore inactive arrays for now --- snmp/mdadm | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/snmp/mdadm b/snmp/mdadm index b0c9b3c5f..023af68a5 100755 --- a/snmp/mdadm +++ b/snmp/mdadm @@ -1,7 +1,7 @@ #!/usr/bin/env bash # MDADM SNMP extension for LibreNMS # Version -extendVer='2.0.0' +extendVer='2' # Initial portion of json mdadmSNMPOutput='{ "data": [' @@ -40,6 +40,10 @@ main() { [[ "${mdadmArray}" =~ '/dev/md'[[:digit:]]+'p' ]] && continue mdadmName="$(basename "$(realpath "${mdadmArray}")")" + + # Ignore inactive arrays + [[ $(grep "^${mdadmName}" /proc/mdstat) =~ 'inactive' ]] && continue + mdadmSysDev="/sys/block/${mdadmName}" degraded=$(maybe_get "${mdadmSysDev}/md/degraded") From 6150a33af81af6ad8474c053c7b436e54d5a5fce Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 9 Jun 2024 22:56:30 -0500 Subject: [PATCH 426/497] add new php-fpm extend that supports multiple instances (#525) --- snmp/php-fpm | 235 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 235 insertions(+) create mode 100755 snmp/php-fpm diff --git a/snmp/php-fpm b/snmp/php-fpm new file mode 100755 index 000000000..7413a5aa3 --- /dev/null +++ b/snmp/php-fpm @@ -0,0 +1,235 @@ +#!/usr/bin/env perl + +=head1 NAME + +php-fpm - LibreNMS JSON SNMP extend for gathering information for php-fpm + +=head1 VERSION + +0.0.1 + +=head1 DESCRIPTION + +For more information, see L. + +=head1 SWITCHES + +=head1 -c + +The config file to use. + +Default: /usr/local/etc/php-fpm_extend.json + +=head2 -C + +Do not compress the information return using GZip+Base64. + +=head1 -h|--help + +Print help info. + +=head1 -v|--version + +Print version info. + +=head1 CONFIG FILE + +The config file is a JSON file. + + - .instances :: An hash of instances to fetch. The key represents the + instance name and value is the URL to fetch, minus the '?json' bit. + Default :: undef + + - .use_exec :: A boolean for instances values should be treated as a command + instead of a URL. All instances must be a command and can not be a lone URL. + The returned data is expected to be parsable JSON data. + Default :: 0 + +Example... + + { + "instances": { + "thefrog": "https://thefrog/fpm-status", + "foobar": "https://foo.bar/fpm-status" + } + } + +A use_exec example... + + { + "instances": { + "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", + "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", + }, + "use_exec": 1 + } + +=cut + +use strict; +use warnings; +use JSON; +use Getopt::Long; +use File::Slurp; +use IO::Compress::Gzip qw(gzip $GzipError); +use MIME::Base64; +use Pod::Usage; +use String::ShellQuote; + +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + +#gets the options +my %opts; +my $do_not_compress; +my $version; +my $help; +my $config_file = '/usr/local/etc/php-fpm_extend.json'; +GetOptions( + C => \$do_not_compress, + v => \$version, + version => \$version, + h => \$help, + help => \$help, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +my @to_total = ( + "accepted conn", + "active processes", + "idle processes", + "listen queue", + "listen queue len", + "max active processes", + "max children reached", + "max listen queue", + "slow requests", + "total processes", +); + +my $to_return = { + data => { + instances => {}, + instance_errors => {}, + errored => 0, + totals => { + "accepted conn" => 0, + "active processes" => 0, + "idle processes" => 0, + "listen queue" => 0, + "listen queue len" => 0, + "max active processes" => 0, + "max children reached" => 0, + "max listen queue" => 0, + "slow requests" => 0, + "total processes" => 0, + }, + }, + version => 1, + error => 0, + errorString => '', +}; + +# error if the config does not exist +if ( !-f $config_file ) { + $to_return->{errorString} = 'Config file, "' . $config_file . '", does not exist'; + $to_return->{error} = 1; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# read the config and decode it +my $config; +eval { + my $raw_config = read_file($config_file); + $config = decode_json($raw_config); +}; +if ($@) { + $to_return->{errorString} = 'Reading config errored... ' . $@; + $to_return->{error} = 2; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# ensure the config is basically sane +if ( !defined( $config->{instances} ) ) { + $to_return->{errorString} = '.instances does not exist in the config'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} +if ( ref( $config->{instances} ) ne 'HASH' ) { + $to_return->{errorString} = '.instances is not a hash'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} +if ( defined( $config->{use_exec} ) && ref( $config->{use_exec} ) ne '' ) { + $to_return->{errorString} = '.use_exec is defined and is a hash or array'; + $to_return->{error} = 3; + return_the_data( $to_return, $do_not_compress ); + exit 1; +} + +# get a list of instances and process each instance +my @instances = keys( %{ $config->{instances} } ); +foreach my $item (@instances) { + if ( ref( $config->{instances}{$item} ) eq '' ) { + my $command; + if ( !$config->{use_exec} ) { + $command = 'curl ' . shell_quote( $config->{instances}{$item} . '?json' ) . ' 2> /dev/null'; + } else { + $command = $config->{instances}{$item}; + } + eval { + my $instance_data_raw = `$command`; + if ( $? ne 0 ) { + $command =~ s/\"/\\\"/g; + die( 'command "' . $command . '" exited non-zero returnining... ' . $instance_data_raw ); + } + my $instance_data; + $to_return->{data}{instances}{$item} = decode_json($instance_data_raw); + }; + # if + if ($@) { + $to_return->{data}{instances}{$item} = {}; + $to_return->{data}{instance_errors}{$item} = $@; + $to_return->{data}{errored} = 1; + }else { + # add the the instance to the totals + foreach my $total_item (@to_total) { + if (defined($to_return->{data}{instances}{$item}{$total_item}) + && $to_return->{data}{instances}{$item}{$total_item} =~ /^\d+$/ + ) { + $to_return->{data}{totals}{$total_item} += $to_return->{data}{instances}{$item}{$total_item}; + } + } + } + } ## end if ( ref( $config->{instances}{$item} ) eq...) +} ## end foreach my $item (@instances) + +return_the_data( $to_return, $do_not_compress ); +exit 0; From 5ef5732ccd1bb88f22886a6d008d22175262d26a Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 15 Jun 2024 04:10:10 -0500 Subject: [PATCH 427/497] php-fpm: change instances to pools and add start since min total (#527) * php-fpm cleanup * add start since min * remove use line for Statistics::Lite --- snmp/php-fpm | 93 ++++++++++++++++++++++++++++++++-------------------- 1 file changed, 57 insertions(+), 36 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 7413a5aa3..0510b8b6d 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -36,19 +36,19 @@ Print version info. The config file is a JSON file. - - .instances :: An hash of instances to fetch. The key represents the - instance name and value is the URL to fetch, minus the '?json' bit. + - .pools :: An hash of pools to fetch. The key represents the + pool name and value is the URL to fetch, minus the '?json' bit. Default :: undef - - .use_exec :: A boolean for instances values should be treated as a command - instead of a URL. All instances must be a command and can not be a lone URL. + - .use_exec :: A boolean for pools values should be treated as a command + instead of a URL. All poolss must be a command and can not be a lone URL. The returned data is expected to be parsable JSON data. Default :: 0 Example... { - "instances": { + "pools": { "thefrog": "https://thefrog/fpm-status", "foobar": "https://foo.bar/fpm-status" } @@ -57,7 +57,7 @@ Example... A use_exec example... { - "instances": { + "pools": { "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", }, @@ -130,12 +130,15 @@ my @to_total = ( "total processes", ); +my @to_migrate = @to_total; +push( @to_migrate, 'start since', 'start time', 'pool', 'process manager' ); + my $to_return = { data => { - instances => {}, - instance_errors => {}, - errored => 0, - totals => { + pools => {}, + pool_errors => {}, + errored => 0, + totals => { "accepted conn" => 0, "active processes" => 0, "idle processes" => 0, @@ -146,6 +149,7 @@ my $to_return = { "max listen queue" => 0, "slow requests" => 0, "total processes" => 0, + 'start since min' => undef, }, }, version => 1, @@ -175,14 +179,14 @@ if ($@) { } # ensure the config is basically sane -if ( !defined( $config->{instances} ) ) { - $to_return->{errorString} = '.instances does not exist in the config'; +if ( !defined( $config->{pools} ) ) { + $to_return->{errorString} = '.pools does not exist in the config'; $to_return->{error} = 3; return_the_data( $to_return, $do_not_compress ); exit 1; } -if ( ref( $config->{instances} ) ne 'HASH' ) { - $to_return->{errorString} = '.instances is not a hash'; +if ( ref( $config->{pools} ) ne 'HASH' ) { + $to_return->{errorString} = '.pools is not a hash'; $to_return->{error} = 3; return_the_data( $to_return, $do_not_compress ); exit 1; @@ -194,42 +198,59 @@ if ( defined( $config->{use_exec} ) && ref( $config->{use_exec} ) ne '' ) { exit 1; } -# get a list of instances and process each instance -my @instances = keys( %{ $config->{instances} } ); -foreach my $item (@instances) { - if ( ref( $config->{instances}{$item} ) eq '' ) { +# get a list of pools and process each pool +my @pools = keys( %{ $config->{pools} } ); +foreach my $item (@pools) { + if ( ref( $config->{pools}{$item} ) eq '' ) { my $command; if ( !$config->{use_exec} ) { - $command = 'curl ' . shell_quote( $config->{instances}{$item} . '?json' ) . ' 2> /dev/null'; + $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json' ) . ' 2> /dev/null'; } else { - $command = $config->{instances}{$item}; + $command = $config->{pools}{$item}; } eval { - my $instance_data_raw = `$command`; + my $pool_data_raw = `$command`; if ( $? ne 0 ) { $command =~ s/\"/\\\"/g; - die( 'command "' . $command . '" exited non-zero returnining... ' . $instance_data_raw ); + die( 'command "' . $command . '" exited non-zero returnining... ' . $pool_data_raw ); + } + my $pool_data = decode_json($pool_data_raw); + $to_return->{data}{pools}{$item} = {}; + # ensure the hash only includes what we want and nothing unexpected + foreach my $migrate_item (@to_migrate) { + if ( defined( $pool_data->{$migrate_item} ) && ref( $pool_data->{$migrate_item} ) eq '' ) { + $to_return->{data}{pools}{$item}{$migrate_item} = $pool_data->{$migrate_item}; + } } - my $instance_data; - $to_return->{data}{instances}{$item} = decode_json($instance_data_raw); }; # if if ($@) { - $to_return->{data}{instances}{$item} = {}; - $to_return->{data}{instance_errors}{$item} = $@; - $to_return->{data}{errored} = 1; - }else { - # add the the instance to the totals + $to_return->{data}{pools}{$item} = {}; + $to_return->{data}{pool_errors}{$item} = $@; + $to_return->{data}{errored} = 1; + } else { + # add the the pool to the totals foreach my $total_item (@to_total) { - if (defined($to_return->{data}{instances}{$item}{$total_item}) - && $to_return->{data}{instances}{$item}{$total_item} =~ /^\d+$/ - ) { - $to_return->{data}{totals}{$total_item} += $to_return->{data}{instances}{$item}{$total_item}; + if ( defined( $to_return->{data}{pools}{$item}{$total_item} ) + && $to_return->{data}{pools}{$item}{$total_item} =~ /^\d+$/ ) + { + $to_return->{data}{totals}{$total_item} += $to_return->{data}{pools}{$item}{$total_item}; } } - } - } ## end if ( ref( $config->{instances}{$item} ) eq...) -} ## end foreach my $item (@instances) + + # handle start since min + if ( defined( $to_return->{data}{pools}{$item}{'start since'} ) + && $to_return->{data}{pools}{$item}{'start since'} =~ /^\d+$/ ) + { + if ( !defined( $to_return->{data}{totals}{'start since min'} ) + || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since min'} ) + { + $to_return->{data}{totals}{'start since min'} = $to_return->{data}{pools}{$item}{'start since'}; + } + } + } ## end else [ if ($@) ] + } ## end if ( ref( $config->{pools}{$item} ) eq '' ) +} ## end foreach my $item (@pools) return_the_data( $to_return, $do_not_compress ); exit 0; From d2e14731fc4a41738a50a5ebe1af7b32ade0d512 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 15 Jun 2024 15:13:05 -0500 Subject: [PATCH 428/497] php-fpm: add last request cpu (#529) --- snmp/php-fpm | 70 +++++++++++++++++++++++++++++++--------------------- 1 file changed, 42 insertions(+), 28 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 0510b8b6d..05a71dc2f 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -58,8 +58,8 @@ A use_exec example... { "pools": { - "thefrog": "curl 'https://thefrog/fpm-status?json' 2> /dev/null", - "foobar": "curl 'https://foo.bar/fpm-status?json' 2> /dev/null", + "thefrog": "curl 'https://thefrog/fpm-status?json&full' 2> /dev/null", + "foobar": "curl 'https://foo.bar/fpm-status?json&full' 2> /dev/null", }, "use_exec": 1 } @@ -118,16 +118,17 @@ if ($help) { } my @to_total = ( - "accepted conn", - "active processes", - "idle processes", - "listen queue", - "listen queue len", - "max active processes", - "max children reached", - "max listen queue", - "slow requests", - "total processes", + 'accepted conn', + 'active processes', + 'idle processes', + 'listen queue', + 'listen queue len', + 'max active processes', + 'max children reached', + 'max listen queue', + 'slow requests', + 'total processes', + 'last request cpu', ); my @to_migrate = @to_total; @@ -139,17 +140,18 @@ my $to_return = { pool_errors => {}, errored => 0, totals => { - "accepted conn" => 0, - "active processes" => 0, - "idle processes" => 0, - "listen queue" => 0, - "listen queue len" => 0, - "max active processes" => 0, - "max children reached" => 0, - "max listen queue" => 0, - "slow requests" => 0, - "total processes" => 0, - 'start since min' => undef, + 'accepted conn' => 0, + 'active processes' => 0, + 'idle processes' => 0, + 'listen queue' => 0, + 'listen queue len' => 0, + 'max active processes' => 0, + 'max children reached' => 0, + 'max listen queue' => 0, + 'slow requests' => 0, + 'total processes' => 0, + 'start since' => undef, + 'last request cpu' => 0, }, }, version => 1, @@ -204,7 +206,7 @@ foreach my $item (@pools) { if ( ref( $config->{pools}{$item} ) eq '' ) { my $command; if ( !$config->{use_exec} ) { - $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json' ) . ' 2> /dev/null'; + $command = 'curl ' . shell_quote( $config->{pools}{$item} . '?json&full' ) . ' 2> /dev/null'; } else { $command = $config->{pools}{$item}; } @@ -222,6 +224,18 @@ foreach my $item (@pools) { $to_return->{data}{pools}{$item}{$migrate_item} = $pool_data->{$migrate_item}; } } + + if (defined($pool_data->{'processes'}) && ref($pool_data->{'processes'}) eq 'ARRAY') { + $to_return->{data}{pools}{$item}{'last request cpu'} = 0; + foreach my $proc_item (@{ $pool_data->{'processes'} }) { + if (defined( $proc_item->{'last request cpu'}) && + ref($proc_item->{'last request cpu'}) eq '' && + $proc_item->{'last request cpu'} =~ /\d+\.\d+/ + ) { + $to_return->{data}{pools}{$item}{'last request cpu'} += $proc_item->{'last request cpu'}; + } + } + } }; # if if ($@) { @@ -232,7 +246,7 @@ foreach my $item (@pools) { # add the the pool to the totals foreach my $total_item (@to_total) { if ( defined( $to_return->{data}{pools}{$item}{$total_item} ) - && $to_return->{data}{pools}{$item}{$total_item} =~ /^\d+$/ ) + && $to_return->{data}{pools}{$item}{$total_item} =~ /^(\d+|\d+\.\d+)$/ ) { $to_return->{data}{totals}{$total_item} += $to_return->{data}{pools}{$item}{$total_item}; } @@ -242,10 +256,10 @@ foreach my $item (@pools) { if ( defined( $to_return->{data}{pools}{$item}{'start since'} ) && $to_return->{data}{pools}{$item}{'start since'} =~ /^\d+$/ ) { - if ( !defined( $to_return->{data}{totals}{'start since min'} ) - || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since min'} ) + if ( !defined( $to_return->{data}{totals}{'start since'} ) + || $to_return->{data}{pools}{$item}{'start since'} < $to_return->{data}{totals}{'start since'} ) { - $to_return->{data}{totals}{'start since min'} = $to_return->{data}{pools}{$item}{'start since'}; + $to_return->{data}{totals}{'start since'} = $to_return->{data}{pools}{$item}{'start since'}; } } } ## end else [ if ($@) ] From 2b0b98837db50e04643d838e79c72a24b44d0089 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 5 Jul 2024 15:22:23 -0500 Subject: [PATCH 429/497] snmp/redis.pl now handles multiple instances and has a config file (#530) --- snmp/redis.pl | 197 ++++++++++++++++++++++++++++++++++++++++---------- 1 file changed, 159 insertions(+), 38 deletions(-) diff --git a/snmp/redis.pl b/snmp/redis.pl index 3d4a80830..812c98f8b 100755 --- a/snmp/redis.pl +++ b/snmp/redis.pl @@ -2,14 +2,24 @@ =head1 NAME -redis.pl - LinbreNMS JSON extend for redis. +redis - LinbreNMS JSON extend for redis. =head1 SYNOPSIS -logsize [B<-B>] +redis [B<-B>] [B<-c> ] + +redis [B<-v>|B<--version>] + +redis [B<-h>|B<--help>] =head1 SWITCHES +=head2 -c + +Config file to use. + +Default: /usr/local/etc/redis_extend.json + =head2 -B Do not the return output via GZip+Base64. @@ -38,6 +48,36 @@ =head1 SETUP # if running it via cron extend redis /usr/local/etc/snmp/redis.pl +If for multiple instances or the default of 'redis-cli info' +won't work, a config file will be needed. The config format +is JSON. + +The config entries are as below. + + - command :: If single instance, the command to use. + Type :: String + Default :: redis-cli + + - instances :: A hash where the keys are the instances names + and the values for each key are the command to use. + +The default config would be like below, which will be what is used +if no config file is specified/found. + + { + "command": "redis-cli info" + } + +For something with two instances, "foo" on port 6379 and "bar" on port 6380 +it would be like below. + + { + "instances": { + "foo": "redis-cli -p 6379", + "bar": "redis-cli -p 6380" + } + } + =cut use warnings; @@ -59,16 +99,39 @@ sub main::HELP_MESSAGE { pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + my $return_json = { error => 0, errorString => '', - version => 1, - data => {}, + version => 2, + data => { 'extend_errors' => [] }, }; #gets the options my %opts = (); -getopts( 'Bhv', \%opts ); +getopts( 'Bhvc:', \%opts ); + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/redis_extend.json'; +} if ( $opts{v} ) { main::VERSION_MESSAGE; @@ -81,40 +144,98 @@ sub main::HELP_MESSAGE { exit 256; } -# ensure that $ENV{PATH} has has it -$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; - -my $output_raw = `redis-cli info 2> /dev/null`; -if ( $? != 0 ) { - $return_json->{error} = 1; - $return_json->{error} = 'redis-cli info exited non-zero'; - print encode_json($return_json) . "\n"; -} - -$output_raw =~ s/\r//g; -my $section; -foreach my $line ( split( /\n/, $output_raw ) ) { - if ( $line ne '' && $line =~ /^# / ) { - $line =~ s/^# //; - $section = $line; - $return_json->{data}{$section} = {}; - } elsif ( $line ne '' && defined($section) ) { - my ( $key, $value ) = split( /\:/, $line ); - if ( defined($key) && defined($value) ) { - $return_json->{data}{$section}{$key} = $value; +my $single = 1; +my $config = { command => 'redis-cli info' }; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + $config = decode_json($raw_config); + if ( !defined( $config->{instances} ) ) { + if ( !defined( $config->{command} ) ) { + $config->{command} = 'redis-cli info'; + } + } elsif ( ref( $config->{instances} ) ne 'HASH' ) { + die( '.instances is defined and is not a hash but ref type ' . ref( $config->{instances} ) ); + } else { + $single = 0; } + }; + if ($@) { + push( @{ $return_json->{data}{extend_errors} }, $@ ); + return_the_data( $return_json, $opts{B} ); + exit 0; } -} ## end foreach my $line ( split( /\n/, $output_raw ) ) +} ## end if ( -f $opts{c} ) -my $return_json_raw = encode_json($return_json); -if ( $opts{B} ) { - print $return_json_raw. "\n"; - exit 0; -} +# ensure that $ENV{PATH} has has it +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; -my $toReturnCompressed; -gzip \$return_json_raw => \$toReturnCompressed; -my $compressed = encode_base64($toReturnCompressed); -$compressed =~ s/\n//g; -$compressed = $compressed . "\n"; -print $compressed; +if ($single) { + my $command = $config->{command}; + my $output_raw = `$command 2> /dev/null`; + if ( $? != 0 ) { + push( + @{ $return_json->{data}{extend_errors} }, + '"' . $command . '" exited non-zero for with... ' . $output_raw + ); + } else { + $output_raw =~ s/\r//g; + my $section; + foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { + $line =~ s/^# //; + $section = $line; + $return_json->{data}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{$section}{$key} = $value; + } + } + } ## end foreach my $line ( split( /\n/, $output_raw ) ) + } ## end else [ if ( $? != 0 ) ] +} else { + my @instances = keys( %{ $config->{instances} } ); + $return_json->{data}{instances} = {}; + foreach my $instance (@instances) { + if ( ref( $config->{instances}{$instance} ) ne '' ) { + push( + @{ $return_json->{data}{extend_errors} }, + 'instance "' . $instance . '" is ref type ' . ref( $config->{instances}{$instance} ) + ); + } elsif ( $instance =~ /^[\-\_]/ ) { + push( @{ $return_json->{data}{extend_errors} }, 'instance "' . $instance . '" matches /^[\-\_]/' ); + } elsif ( $instance =~ /[\-\_\n\s\"\']$/ ) { + push( @{ $return_json->{data}{extend_errors} }, + 'instance "' . $instance . '" matches /[\-\_\n\s\'\\\"]$/' ); + } else { + my $command = $config->{instances}{$instance}; + my $output_raw = `$command 2> /dev/null`; + if ( $? != 0 ) { + push( + @{ $return_json->{data}{extend_errors} }, + '"' . $command . '" exited non-zero for instance "' . $instance . '" with... ' . $output_raw + ); + } else { + $output_raw =~ s/\r//g; + my $section; + $return_json->{data}{instances}{$instance} = {}; + foreach my $line ( split( /\n/, $output_raw ) ) { + if ( $line ne '' && $line =~ /^# / ) { + $line =~ s/^# //; + $section = $line; + $return_json->{data}{instances}{$instance}{$section} = {}; + } elsif ( $line ne '' && defined($section) ) { + my ( $key, $value ) = split( /\:/, $line ); + if ( defined($key) && defined($value) ) { + $return_json->{data}{instances}{$instance}{$section}{$key} = $value; + } + } + } ## end foreach my $line ( split( /\n/, $output_raw ) ) + } ## end else [ if ( $? != 0 ) ] + } ## end else [ if ( ref( $config->{instances}{$instance} ...))] + } ## end foreach my $instance (@instances) +} ## end else [ if ($single) ] + +return_the_data( $return_json, $opts{B} ); +exit 0; From 0c86ae620772f4c8b8bf834593d084a6ba4005a6 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 22:16:14 -0500 Subject: [PATCH 430/497] add new wireguard extend (#531) * add new wireguard extend * add perl tidy and perl critic configs * disable jscpd as it resusts in false positives * note why jscpd is disabled --- .github/workflows/linter.yml | 3 + .perlcriticrc | 1 + .perltidyrc | 29 ++ snmp/wireguard.pl | 528 +++++++++++++++++++++++++++++++++++ 4 files changed, 561 insertions(+) create mode 100644 .perlcriticrc create mode 100644 .perltidyrc create mode 100755 snmp/wireguard.pl diff --git a/.github/workflows/linter.yml b/.github/workflows/linter.yml index 797b4f20f..0aa1ee5a5 100644 --- a/.github/workflows/linter.yml +++ b/.github/workflows/linter.yml @@ -30,6 +30,9 @@ jobs: VALIDATE_PHP_PHPCS: false VALIDATE_PHP_PSALM: false + # errors on sanity checking in snmp/wireguard.pl + VALIDATE_JSCPD: false + SHELLCHECK_OPTS: --severity=warning DEFAULT_BRANCH: master diff --git a/.perlcriticrc b/.perlcriticrc new file mode 100644 index 000000000..ab2e45531 --- /dev/null +++ b/.perlcriticrc @@ -0,0 +1 @@ +exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval diff --git a/.perltidyrc b/.perltidyrc new file mode 100644 index 000000000..87f068e2f --- /dev/null +++ b/.perltidyrc @@ -0,0 +1,29 @@ +-l=120 +-i=4 +-ci=4 +-st +-se + +-et=4 + +#-aws +-xci +#-dws +-vt=0 +-cti=0 +-bt=1 +-sbt=1 +-bbt=0 +-nsfs +-nolq +-ce +-csc +-csci=10 +-csct=40 +-cb +-iscl +-sbc +-nbbc + +-wbb="% + - * / x != == >= <= =~ !~ < > | & >= < = **= += *= &= <<= &&= -= /= |= >>= ||= .= %= ^= x=" + diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl new file mode 100755 index 000000000..d032c772a --- /dev/null +++ b/snmp/wireguard.pl @@ -0,0 +1,528 @@ +#!/usr/bin/env perl + +use warnings; +use strict; + +=head1 NAME + +wireguard - LinbreNMS JSON extend for wireguard. + +=head1 VERSION + +0.0.1 + +=cut + +our $VERSION = '0.0.1'; + +=head1 SYNOPSIS + +wireguard [B<-B>] [B<-c> ] [B<-p><0|1>] [B<-r> ] [B<-s><0|1>] + +wireguard [B<-v>|B<--version>] + +wireguard [B<-h>|B<--help>] + +=head1 SWITCHES + +=head2 -c + +Config file to use. + +Default: /usr/local/etc/wireguard_extend.json + +=head2 -p <0|1> + +Include the public key. + +Overrides the config item .include_pubey . + +=head2 -r + +A string of resolvers to use. + +Overrides the config item .pubkey_resolvers . + +=head2 -s <0|1> + +Use short hostnames + +Overrides the config item .use_short_hostname . + +=head2 -B + +Do not the return output via GZip+Base64. + +=head2 -h|--help + +Print help info. + +=head2 -v|--version + +Print version info. + +=head1 INSTALL + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-TOML p5-MIME-Base64 + # Debian + apt-get install libjson-perl libmime-base64-perl + +Then set it up in SNMPD. + + # if running it via cron + extend wireguard /usr/local/etc/snmp/wireguard + +=head1 CONFIG + +The default config is /usr/local/etc/wireguard_extend.json . + +The keys for it are as below. + + - include_pubkey :: Include the pubkey with the return. + values :: 0|1 + default :: 0 + + - use_short_hostname :: If the hostname should be shortname to just the first bit. + values :: 0|1 + default :: 1 + + - public_key_to_arbitrary_name :: An array of pubkys to name mappings. + default :: {} + + - pubkey_resolvers :: A list of resolvers to use to convert pubkeys to names. The + value is a comma seperated string. + default :: config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip + +=head2 PUBKEY RESOLVERS + +=head3 config + +Use the mappings from .public_key_to_arbitrary_name . + +The names are unaffected by .use_short_names . + +=head3 endpoint_if_first_allowed_is_subnet_use_hosts + +If the first allowed IP is a subnet, see if a matching IP can +be found in hosts for the endpoint. + +=head3 endpoint_if_first_allowed_is_subnet_use_getent + +If the first allowed IP is a subnet, see if a hit can be +found for the endpoint IP via getent hosts. + +This will possible use reverse DNS. + +=head3 endpoint_if_first_allowed_is_subnet_use_ip + +If the first allowed IP is a subnet, use the endpoint +IP for the name. + +=head3 first_allowed_use_hosts + +See if a match can be found in hosts for the first allowed IP. + +=head3 first_allowed_use_getent + +Use getent hosts to see try to fetch a match for the first +allowed IP. + +This will possible use reverse DNS. + +=head3 first_allowed_use_ip + +Use the first allowed IP as the name. + +=cut + +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; +use Socket; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +sub main::VERSION_MESSAGE { + print 'wireguard LibreNMS extend v. '.$VERSION."\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +sub return_the_data { + my $to_return = $_[0]; + my $do_not_compress = $_[1]; + + my $to_return_string = encode_json($to_return); + + if ($do_not_compress) { + print $to_return_string . "\n"; + return; + } + + my $toReturnCompressed; + gzip \$to_return_string => \$toReturnCompressed; + my $compressed = encode_base64($toReturnCompressed); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end sub return_the_data + +# arg[0]: string +# return[0]: host +# return[1]: port +sub host_port_split { + my $string = $_[0]; + if ( !defined($string) || $string =~ /\([Nn][Oo][Nn][Ee]\)/ ) { + return undef, undef; + } + + my $host = $string; + my $port = $string; + if ( $string =~ /^\[/ ) { + $host =~ s/^\[//; + $host =~ s/\]\:.*$//; + $port =~ s/^.*\]\://; + } else { + $host =~ s/\:.*$//; + $port =~ s/^.*\://; + } + + return $host, $port; +} ## end sub host_port_split + +my $return_json = { + error => 0, + errorString => '', + version => 2, + data => {}, +}; + +#gets the options +my %opts = (); +getopts( 'Bhvc:r:s:p:', \%opts ); + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/wireguard_extend.json'; +} + +if ( $opts{v} ) { + &main::VERSION_MESSAGE; + exit 1; +} + +if ( $opts{h} ) { + &main::HELP_MESSAGE; + exit 1; +} + +## +## +## real in the config +## +## +our $config = { + include_pubkey => 0, + pubkey_resolvers => + 'config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip', + use_short_hostname => 1, + public_key_to_arbitrary_name => {}, +}; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + my $parsed_config = decode_json($raw_config); + if ( defined( $parsed_config->{public_key_to_arbitrary_name} ) + && ref( $parsed_config->{public_key_to_arbitrary_name} ) eq 'HASH' ) + { + $config->{public_key_to_arbitrary_name} = $parsed_config->{public_key_to_arbitrary_name}; + } + if ( defined( $parsed_config->{include_pubkey} ) && ref( $parsed_config->{include_pubkey} ) eq '' ) { + $config->{include_pubkey} = $parsed_config->{include_pubkey}; + } + if ( defined( $parsed_config->{pubkey_resolvers} ) && ref( $parsed_config->{pubkey_resolvers} ) eq '' ) { + $config->{pubkey_resolvers} = $parsed_config->{pubkey_resolvers}; + $config->{pubkey_resolvers} =~ s/\ //g; + } + if ( defined( $parsed_config->{pubkey_resolver_cache_file} ) + && ref( $parsed_config->{pubkey_resolver_cache_file} ) eq '' ) + { + $config->{pubkey_resolver_cache_file} = $parsed_config->{pubkey_resolver_cache_file}; + } + if ( defined( $parsed_config->{use_short_hostname} ) && ref( $parsed_config->{use_short_hostname} ) eq '' ) + { + $config->{use_short_hostname} = $parsed_config->{use_short_hostname}; + } + }; + if ($@) { + $return_json->{error} = 1; + $return_json->{errorString} = $@; + return_the_data( $return_json, $opts{B} ); + exit 0; + } +} ## end if ( -f $opts{c} ) + +if ( defined( $opts{p} ) ) { + $config->{include_pubkey} = $opts{p}; +} + +if ( defined( $opts{s} ) ) { + $config->{use_short_hostname} = $opts{s}; +} + +if ( defined( $opts{r} ) ) { + $config->{pubkey_resolvers} = $opts{r}; + $config->{pubkey_resolvers} =~ s/\ //g; +} + +# ensure that $ENV{PATH} has has it +$ENV{PATH} = $ENV{PATH} . ':/usr/bin:/usr/sbin:/usr/local/sbin:/usr/local/bin'; + +## +## +## get all the information +## +## +my $wg_info = {}; + +# get endpoint info +my $command_raw = `wg show all endpoints 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + my $interface; + foreach my $line (@command_split) { + my $pubkey; + my $host; + my $port; + + my @line_split = split( /[\t\ ]+/, $line ); + if ( defined( $line_split[2] ) ) { + $interface = $line_split[0]; + $pubkey = $line_split[1]; + ( $host, $port ) = host_port_split( $line_split[2] ); + } else { + $pubkey = $line_split[0]; + ( $host, $port ) = host_port_split( $line_split[1] ); + } + + $wg_info->{$interface} = { + $pubkey => { + endpoint_host => $host, + endpoint_port => $port, + allowed_ips => [], + } + }; + } ## end foreach my $line (@command_split) +} ## end if ( $? == 0 ) + +# get the transfer info +$command_raw = `wg show all transfer 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my ( $interface, $pubkey, $recv, $sent ) = split( /[\t\ ]+/, $line ); + if ( defined($sent) ) { + $wg_info->{$interface}{$pubkey}{bytes_rcvd} = $recv; + $wg_info->{$interface}{$pubkey}{bytes_sent} = $sent; + } + } +} ## end if ( $? == 0 ) + +# get the handshake info +$command_raw = `wg show all latest-handshakes 2> /dev/null`; +my $current_time = time; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my ( $interface, $pubkey, $when ) = split( /[\t\ ]+/, $line ); + if ( $when == 0 ) { + $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = undef; + } else { + $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; + } + } +} ## end if ( $? == 0 ) + +# get allowed subnets +$command_raw = `wg show all allowed-ips 2> /dev/null`; +if ( $? == 0 ) { + my @command_split = split( /\n/, $command_raw ); + foreach my $line (@command_split) { + my @line_split = split( /[\t\ ]+/, $line ); + my $int = 2; + while ( defined( $line_split[$int] ) ) { + if ( $line_split[$int] =~ /^[0-9\.]+\/32$/ ) { + $line_split[$int] =~ s/\/32//; + } elsif ( $line_split[$int] =~ /^[A-Fa-f0-9\:]+\/128$/ ) { + $line_split[$int] =~ s/\/128//; + } + push( @{ $wg_info->{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); + $int++; + } + } ## end foreach my $line (@command_split) +} ## end if ( $? == 0 ) + +## +## +## try to translate pubkeys to a name +## +## +sub getent_hosts { + my $ip = $_[0]; + if ( !defined($ip) ) { + return undef; + } + # a bit of sanity checking, but this should never hit... wg should only return IPs for what this is used for + if ( $ip !~ /^[a-fA-F\:\.0-9]+$/ ) { + return undef; + } + my $command_raw = `getent hosts $ip 2> /dev/null`; + if ( $? != 0 ) { + return undef; + } + my @command_split = split( /\n/, $command_raw ); + if ( defined( $command_split[0] ) ) { + my @line_split = split( /[\t\ ]+/, $command_split[0] ); + if ( defined( $line_split[1] ) ) { + $line_split[1] =~ s/^\.//; + if ( $config->{use_short_hostname} ) { + $line_split[1] =~ s/\..*$//; + } + return $line_split[1]; + } + } ## end if ( defined( $command_split[0] ) ) + return undef; +} ## end sub getent_hosts +our $hosts_read = 0; +our $hosts = {}; + +sub hosts { + my $ip = $_[0]; + if ( !defined($ip) ) { + return undef; + } + if ( !$hosts_read ) { + $hosts_read = 1; + eval { + my $hosts_raw = read_file('/etc/hosts'); + my @hosts_split = grep( !/^[\t\ ]*$/, grep( !/^[\ \t]*\#/, split( /\n/, $hosts_raw ) ) ); + foreach my $line (@hosts_split) { + my @line_split = split( /[\t\ ]+/, $line ); + if ( defined( $line_split[0] ) && defined( $line_split[1] ) ) { + $line_split[1] =~ s/^\.//; + if ( $config->{use_short_hostname} ) { + $line_split[1] =~ s/\..*$//; + } + + $hosts->{ $line_split[0] } = $line_split[1]; + } + } ## end foreach my $line (@hosts_split) + }; + } ## end if ( !$hosts_read ) + if ( defined( $hosts->{$ip} ) ) { + return $hosts->{$ip}; + } + return undef; +} ## end sub hosts +my @interfaces = keys( %{$wg_info} ); +my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); +foreach my $interface (@interfaces) { + my @pubkeys = keys( %{ $wg_info->{$interface} } ); + foreach my $pubkey (@pubkeys) { + my $matched = 0; + my $resolvers_int = 0; + while ( !$matched && defined( $resolvers[$resolvers_int] ) ) { + my $resolver = $resolvers[$resolvers_int]; + if ( !$matched && $resolver eq 'config' ) { + if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { + $wg_info->{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $matched = 1; + } + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + my $name = getent_hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_hosts' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + my $name = hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_ip' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + { + $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{endpoint_host}; + $matched = 1; + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + my $name = getent_hosts($host); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_hosts' ) { + if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + my $name = hosts($host); + if ( defined($name) ) { + $wg_info->{$interface}{$pubkey}{name} = $name; + $matched = 1; + } + } + } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { + $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + $matched = 1; + } + $resolvers_int++; + } ## end while ( !$matched && defined( $resolvers[$resolvers_int...])) + } ## end foreach my $pubkey (@pubkeys) +} ## end foreach my $interface (@interfaces) + +## +## +## translate found information to output info +## +## + +foreach my $interface (@interfaces) { + my @pubkeys = keys( %{ $wg_info->{$interface} } ); + foreach my $pubkey (@pubkeys) { + if ( defined( $wg_info->{$interface}{$pubkey}{name} ) ) { + if ( !defined( $return_json->{data}{$interface} ) ) { + $return_json->{data}{$interface} = {}; + } + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} } = $wg_info->{$interface}{$pubkey}; + if ($config->{include_pubkey}) { + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + }else { + $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = undef; + } + } + } +} ## end foreach my $interface (@interfaces) + +return_the_data( $return_json, $opts{B} ); From 6d4eb35f3bebe1e0c453f1a508f3995d4fb031fd Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 22:59:29 -0500 Subject: [PATCH 431/497] cleanup php-fpm a bix and fix being able to use a custom config path (#532) --- snmp/php-fpm | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/snmp/php-fpm b/snmp/php-fpm index 05a71dc2f..324abdc49 100755 --- a/snmp/php-fpm +++ b/snmp/php-fpm @@ -6,7 +6,7 @@ php-fpm - LibreNMS JSON SNMP extend for gathering information for php-fpm =head1 VERSION -0.0.1 +0.0.2 =head1 DESCRIPTION @@ -14,7 +14,7 @@ For more information, see L +=head2 -f The config file to use. @@ -24,11 +24,11 @@ Default: /usr/local/etc/php-fpm_extend.json Do not compress the information return using GZip+Base64. -=head1 -h|--help +=head2 -h|--help Print help info. -=head1 -v|--version +=head2 -v|--version Print version info. @@ -103,6 +103,7 @@ my $help; my $config_file = '/usr/local/etc/php-fpm_extend.json'; GetOptions( C => \$do_not_compress, + 'f=s' => \$config_file, v => \$version, version => \$version, h => \$help, @@ -225,17 +226,17 @@ foreach my $item (@pools) { } } - if (defined($pool_data->{'processes'}) && ref($pool_data->{'processes'}) eq 'ARRAY') { + if ( defined( $pool_data->{'processes'} ) && ref( $pool_data->{'processes'} ) eq 'ARRAY' ) { $to_return->{data}{pools}{$item}{'last request cpu'} = 0; - foreach my $proc_item (@{ $pool_data->{'processes'} }) { - if (defined( $proc_item->{'last request cpu'}) && - ref($proc_item->{'last request cpu'}) eq '' && - $proc_item->{'last request cpu'} =~ /\d+\.\d+/ - ) { + foreach my $proc_item ( @{ $pool_data->{'processes'} } ) { + if ( defined( $proc_item->{'last request cpu'} ) + && ref( $proc_item->{'last request cpu'} ) eq '' + && $proc_item->{'last request cpu'} =~ /\d+\.\d+/ ) + { $to_return->{data}{pools}{$item}{'last request cpu'} += $proc_item->{'last request cpu'}; } } - } + } ## end if ( defined( $pool_data->{'processes'} ) ...) }; # if if ($@) { From f1713dd62416e99082ad22aa12925cc5203c7ac0 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 6 Jul 2024 23:38:06 -0500 Subject: [PATCH 432/497] wireguard: fix depend info and remove use socket as that is not being used (#533) --- snmp/wireguard.pl | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index d032c772a..271fefe2b 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -66,9 +66,9 @@ =head1 INSTALL Install the depends. # FreeBSD - pkg install p5-JSON p5-TOML p5-MIME-Base64 + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 # Debian - apt-get install libjson-perl libmime-base64-perl + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl Then set it up in SNMPD. @@ -144,7 +144,6 @@ =head3 first_allowed_use_ip use IO::Compress::Gzip qw(gzip $GzipError); use File::Slurp; use Pod::Usage; -use Socket; $Getopt::Std::STANDARD_HELP_VERSION = 1; From b0ada6e01032243456d0efb919cddf9f37d13e2b Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 03:31:56 -0500 Subject: [PATCH 433/497] endpoint stuff now works (#534) --- snmp/wireguard.pl | 108 ++++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 52 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 271fefe2b..5a35197e8 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -9,11 +9,11 @@ =head1 NAME =head1 VERSION -0.0.1 +0.0.2 =cut -our $VERSION = '0.0.1'; +our $VERSION = '0.0.2'; =head1 SYNOPSIS @@ -148,7 +148,7 @@ =head3 first_allowed_use_ip $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print 'wireguard LibreNMS extend v. '.$VERSION."\n"; + print 'wireguard LibreNMS extend v. ' . $VERSION . "\n"; } sub main::HELP_MESSAGE { @@ -228,7 +228,7 @@ sub host_port_split { ## ## our $config = { - include_pubkey => 0, + include_pubkey => 0, pubkey_resolvers => 'config,endpoint_if_first_allowed_is_subnet_use_hosts,endpoint_if_first_allowed_is_subnet_use_ip,first_allowed_use_hosts,first_allowed_use_ip', use_short_hostname => 1, @@ -289,7 +289,7 @@ sub host_port_split { ## get all the information ## ## -my $wg_info = {}; +my %wg_info; # get endpoint info my $command_raw = `wg show all endpoints 2> /dev/null`; @@ -308,15 +308,19 @@ sub host_port_split { ( $host, $port ) = host_port_split( $line_split[2] ); } else { $pubkey = $line_split[0]; - ( $host, $port ) = host_port_split( $line_split[1] ); + if ( $line_split[1] =~ /^[\[\]0-9\.A-Fa-f]+\:[0-9]+$/ ) { + ( $host, $port ) = host_port_split( $line_split[1] ); + } } - $wg_info->{$interface} = { - $pubkey => { - endpoint_host => $host, - endpoint_port => $port, - allowed_ips => [], - } + if ( !defined( $wg_info{$interface} ) ) { + $wg_info{$interface} = {}; + } + + $wg_info{$interface}{$pubkey} = { + endpoint_host => $host, + endpoint_port => $port, + allowed_ips => [], }; } ## end foreach my $line (@command_split) } ## end if ( $? == 0 ) @@ -328,8 +332,8 @@ sub host_port_split { foreach my $line (@command_split) { my ( $interface, $pubkey, $recv, $sent ) = split( /[\t\ ]+/, $line ); if ( defined($sent) ) { - $wg_info->{$interface}{$pubkey}{bytes_rcvd} = $recv; - $wg_info->{$interface}{$pubkey}{bytes_sent} = $sent; + $wg_info{$interface}{$pubkey}{bytes_rcvd} = $recv; + $wg_info{$interface}{$pubkey}{bytes_sent} = $sent; } } } ## end if ( $? == 0 ) @@ -342,9 +346,9 @@ sub host_port_split { foreach my $line (@command_split) { my ( $interface, $pubkey, $when ) = split( /[\t\ ]+/, $line ); if ( $when == 0 ) { - $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = undef; + $wg_info{$interface}{$pubkey}{minutes_since_last_handshake} = undef; } else { - $wg_info->{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; + $wg_info{$interface}{$pubkey}{minutes_since_last_handshake} = ( $current_time - $when ) / 60; } } } ## end if ( $? == 0 ) @@ -362,7 +366,7 @@ sub host_port_split { } elsif ( $line_split[$int] =~ /^[A-Fa-f0-9\:]+\/128$/ ) { $line_split[$int] =~ s/\/128//; } - push( @{ $wg_info->{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); + push( @{ $wg_info{ $line_split[0] }{ $line_split[1] }{allowed_ips} }, $line_split[$int] ); $int++; } } ## end foreach my $line (@command_split) @@ -430,10 +434,10 @@ sub hosts { } return undef; } ## end sub hosts -my @interfaces = keys( %{$wg_info} ); +my @interfaces = keys(%wg_info); my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); foreach my $interface (@interfaces) { - my @pubkeys = keys( %{ $wg_info->{$interface} } ); + my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { my $matched = 0; my $resolvers_int = 0; @@ -441,59 +445,59 @@ sub hosts { my $resolver = $resolvers[$resolvers_int]; if ( !$matched && $resolver eq 'config' ) { if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { - $wg_info->{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; $matched = 1; } } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - my $name = getent_hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + my $name = getent_hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } - } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_hosts' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - my $name = hosts( $wg_info->{$interface}{$pubkey}{endpoint_host} ); + my $name = hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } - } ## end if ( defined( $wg_info->{$interface}{$pubkey...})) + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_ip' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) - && $wg_info->{$interface}{$pubkey}{allowed_ips}[0] =~ /\// - && defined( $wg_info->{$interface}{$pubkey}{endpoint_host} ) ) + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) + && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// + && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{endpoint_host}; + $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{endpoint_host}; $matched = 1; } } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { - my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = getent_hosts($host); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_hosts' ) { - if ( defined( $wg_info->{$interface}{$pubkey}{allowed_ips}[0] ) ) { - my $host = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) ) { + my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = hosts($host); if ( defined($name) ) { - $wg_info->{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{name} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { - $wg_info->{$interface}{$pubkey}{name} = $wg_info->{$interface}{$pubkey}{allowed_ips}[0]; + $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; $matched = 1; } $resolvers_int++; @@ -508,20 +512,20 @@ sub hosts { ## foreach my $interface (@interfaces) { - my @pubkeys = keys( %{ $wg_info->{$interface} } ); + my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { - if ( defined( $wg_info->{$interface}{$pubkey}{name} ) ) { + if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { if ( !defined( $return_json->{data}{$interface} ) ) { $return_json->{data}{$interface} = {}; } - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} } = $wg_info->{$interface}{$pubkey}; - if ($config->{include_pubkey}) { - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = $pubkey; - }else { - $return_json->{data}{$interface}{ $wg_info->{$interface}{$pubkey}{name} }{pubkey} = undef; + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} } = $wg_info{$interface}{$pubkey}; + if ( $config->{include_pubkey} ) { + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + } else { + $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = undef; } - } - } + } ## end if ( defined( $wg_info{$interface}{$pubkey...})) + } ## end foreach my $pubkey (@pubkeys) } ## end foreach my $interface (@interfaces) return_the_data( $return_json, $opts{B} ); From 9c2ef412c001465d29c4468d7827418d57df1827 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 10:10:06 -0500 Subject: [PATCH 434/497] now save the hostname if we got it (#535) --- snmp/wireguard.pl | 52 ++++++++++++++++++++++++++--------------------- 1 file changed, 29 insertions(+), 23 deletions(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 5a35197e8..49e0c0ec6 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -9,11 +9,11 @@ =head1 NAME =head1 VERSION -0.0.2 +0.0.3 =cut -our $VERSION = '0.0.2'; +our $VERSION = '0.0.3'; =head1 SYNOPSIS @@ -395,14 +395,12 @@ sub getent_hosts { my @line_split = split( /[\t\ ]+/, $command_split[0] ); if ( defined( $line_split[1] ) ) { $line_split[1] =~ s/^\.//; - if ( $config->{use_short_hostname} ) { - $line_split[1] =~ s/\..*$//; - } return $line_split[1]; } - } ## end if ( defined( $command_split[0] ) ) + } return undef; } ## end sub getent_hosts + our $hosts_read = 0; our $hosts = {}; @@ -420,13 +418,9 @@ sub hosts { my @line_split = split( /[\t\ ]+/, $line ); if ( defined( $line_split[0] ) && defined( $line_split[1] ) ) { $line_split[1] =~ s/^\.//; - if ( $config->{use_short_hostname} ) { - $line_split[1] =~ s/\..*$//; - } - $hosts->{ $line_split[0] } = $line_split[1]; } - } ## end foreach my $line (@hosts_split) + } }; } ## end if ( !$hosts_read ) if ( defined( $hosts->{$ip} ) ) { @@ -434,6 +428,7 @@ sub hosts { } return undef; } ## end sub hosts + my @interfaces = keys(%wg_info); my @resolvers = split( /\,+/, $config->{pubkey_resolvers} ); foreach my $interface (@interfaces) { @@ -445,8 +440,9 @@ sub hosts { my $resolver = $resolvers[$resolvers_int]; if ( !$matched && $resolver eq 'config' ) { if ( defined( $config->{public_key_to_arbitrary_name}{$pubkey} ) ) { - $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; - $matched = 1; + $wg_info{$interface}{$pubkey}{name} = $config->{public_key_to_arbitrary_name}{$pubkey}; + $wg_info{$interface}{$pubkey}{hostname} = undef; + $matched = 1; } } elsif ( !$matched && $resolver eq 'endpoint_if_first_allowed_is_subnet_use_getent' ) { if ( defined( $wg_info{$interface}{$pubkey}{allowed_ips}[0] ) @@ -455,7 +451,7 @@ sub hosts { { my $name = getent_hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) @@ -466,7 +462,7 @@ sub hosts { { my $name = hosts( $wg_info{$interface}{$pubkey}{endpoint_host} ); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) @@ -475,7 +471,7 @@ sub hosts { && $wg_info{$interface}{$pubkey}{allowed_ips}[0] =~ /\// && defined( $wg_info{$interface}{$pubkey}{endpoint_host} ) ) { - $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{endpoint_host}; + $wg_info{$interface}{$pubkey}{hostname} = $wg_info{$interface}{$pubkey}{endpoint_host}; $matched = 1; } } elsif ( !$matched && $resolver eq 'first_allowed_use_getent' ) { @@ -483,7 +479,7 @@ sub hosts { my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = getent_hosts($host); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } @@ -492,12 +488,12 @@ sub hosts { my $host = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; my $name = hosts($host); if ( defined($name) ) { - $wg_info{$interface}{$pubkey}{name} = $name; + $wg_info{$interface}{$pubkey}{hostname} = $name; $matched = 1; } } } elsif ( !$matched && $resolver eq 'first_allowed_use_ip' ) { - $wg_info{$interface}{$pubkey}{name} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; + $wg_info{$interface}{$pubkey}{hostname} = $wg_info{$interface}{$pubkey}{allowed_ips}[0]; $matched = 1; } $resolvers_int++; @@ -514,15 +510,25 @@ sub hosts { foreach my $interface (@interfaces) { my @pubkeys = keys( %{ $wg_info{$interface} } ); foreach my $pubkey (@pubkeys) { - if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { + if ( defined( $wg_info{$interface}{$pubkey}{name} ) || $wg_info{$interface}{$pubkey}{hostname} ) { if ( !defined( $return_json->{data}{$interface} ) ) { $return_json->{data}{$interface} = {}; } - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} } = $wg_info{$interface}{$pubkey}; + my $name; + if ( defined( $wg_info{$interface}{$pubkey}{name} ) ) { + $name = $wg_info{$interface}{$pubkey}{name}; + delete( $wg_info{$interface}{$pubkey}{name} ); + } else { + $name = $wg_info{$interface}{$pubkey}{hostname}; + if ( $config->{use_short_hostname} ) { + $name =~ s/\..*$//; + } + } + $return_json->{data}{$interface}{$name} = $wg_info{$interface}{$pubkey}; if ( $config->{include_pubkey} ) { - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = $pubkey; + $return_json->{data}{$interface}{$name}{pubkey} = $pubkey; } else { - $return_json->{data}{$interface}{ $wg_info{$interface}{$pubkey}{name} }{pubkey} = undef; + $return_json->{data}{$interface}{$name}{pubkey} = undef; } } ## end if ( defined( $wg_info{$interface}{$pubkey...})) } ## end foreach my $pubkey (@pubkeys) From 95d35d50ceb0a86d770b78b35de5723d411930d0 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 10 Jul 2024 14:27:32 -0500 Subject: [PATCH 435/497] don't shorten the hostname for wireguard.pl if it is a IPv4 address (#536) --- snmp/wireguard.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/wireguard.pl b/snmp/wireguard.pl index 49e0c0ec6..45f6c01a8 100755 --- a/snmp/wireguard.pl +++ b/snmp/wireguard.pl @@ -520,7 +520,7 @@ sub hosts { delete( $wg_info{$interface}{$pubkey}{name} ); } else { $name = $wg_info{$interface}{$pubkey}{hostname}; - if ( $config->{use_short_hostname} ) { + if ( $config->{use_short_hostname} && $name !~ /^[0-9\.]+$/) { $name =~ s/\..*$//; } } From 5b68f45baedad3ec541537b96d03a11863f3e59f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 14 Jul 2024 12:18:39 -0500 Subject: [PATCH 436/497] more work on the poudriere extend (#513) * more cleanup * add package and log size * fix checksum and add in some checks to make sure the value passed to read_dir is defined * more cleanup --- snmp/poudriere | 98 +++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 85 insertions(+), 13 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index b413fc895..36244c4f5 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -1,4 +1,4 @@ -#!/usr/bin/env perl +#!/usr/local/bin/perl =head1 NAME @@ -6,7 +6,7 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.0.1 +0.1.0 =head1 SYNOPSIS @@ -45,6 +45,12 @@ meaning it will be written out to the two locations. The later is for use with returning data for SNMP. Will be compressed if possible. +=head1 REQUIREMENTS + + p5-File-Slurp + p5-MIME-Base64 + p5-JSON + =cut use strict; @@ -55,6 +61,7 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; use JSON; +use Cwd 'abs_path'; sub time_to_seconds { my $time = $_[0]; @@ -66,13 +73,23 @@ sub time_to_seconds { if ( $time =~ /^0\:[0-9]+\.[0-9]+$/ ) { $time =~ s/^0\://; return $time; - } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ ) { + } elsif ( $time =~ /^[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+\:[0-9]+$/ ) + { my $minutes = $time; $minutes =~ s/\:.*//; $time =~ s/.*\://; $time = ( $minutes * 60 ) + $time; return $time; - } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ ) { + } elsif ( $time =~ /^[0-9]+\:[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+\:[0-9]+\:[0-9]+$/ ) + { + my ( $hours, $minutes, $seconds ) = split( /:/, $time ); + $time = ( $hours * 3600 ) + ( $minutes * 60 ) + $seconds; + return $time; + } elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/ + || $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+$/ ) + { my $days = $time; $days =~ s/D\:.*$//; my $minutes = $time; @@ -80,7 +97,7 @@ sub time_to_seconds { $minutes =~ s/\:.*//; $time = ( $days * 86400 ) + ( $minutes * 60 ) + $time; return $time; - } + } ## end elsif ( $time =~ /^[0-9]+D\:[0-9]+\:[0-9]+\.[0-9]+$/...) # return 0 for anything unknown return 0; @@ -162,7 +179,8 @@ my $data = { 'check-sanity' => 0, 'pkg-depends' => 0, 'fetch-depends' => 0, - 'fetch checksum' => 0, + 'fetch' => 0, + 'checksum' => 0, 'extract-depends' => 0, 'extract' => 0, 'patch-depends' => 0, @@ -174,6 +192,12 @@ my $data = { 'run-depends' => 0, 'stage' => 0, 'package' => 0, + 'package_size_all' => 0, + 'package_size_latest' => 0, + 'package_size_building' => 0, + 'log_size_latest' => 0, + 'log_size_done' => 0, + 'log_size_per_package' => 0, }, jailANDportsANDset => {} }; @@ -195,10 +219,10 @@ my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'RE ### ### -my $status_raw = `poudriere -N status -f 2> /dev/null`; +my $status_raw = `poudriere -N status -f -l 2> /dev/null`; if ( $? == 0 ) { $data->{status} = $status_raw; - $data->{build_info} = `poudriere -N status -f -b 2>&1`; + $data->{build_info} = `poudriere -N status -f -b -l 2>&1`; my @status_split = split( /\n/, $status_raw ); my $status_split_int = 1; @@ -235,7 +259,8 @@ if ( $? == 0 ) { 'check-sanity' => 0, 'pkg-depends' => 0, 'fetch-depends' => 0, - 'fetch checksum' => 0, + 'fetch' => 0, + 'checksum' => 0, 'extract-depends' => 0, 'extract' => 0, 'patch-depends' => 0, @@ -247,6 +272,12 @@ if ( $? == 0 ) { 'run-depends' => 0, 'stage' => 0, 'package' => 0, + 'package_size_all' => 0, + 'package_size_latest' => 0, + 'package_size_building' => 0, + 'log_size_latest' => 0, + 'log_size_done' => 0, + 'log_size_per_package' => 0, }; ( $found->{SET}, $found->{PORTS}, $found->{JAIL}, $found->{BUILD}, $found->{STATUS}, @@ -265,11 +296,52 @@ if ( $? == 0 ) { $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; } + $found->{packages_dir_all} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/All'; + $found->{packages_dir_latest} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/Latest'; + $found->{packages_dir_building} + = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/.building'; + $found->{logs_dir_latest} = $found->{LOGS} . '/logs'; + $found->{logs_dir_done} = $found->{LOGS} . '/../latest-done/logs'; + $found->{logs_dir_per_package} = $found->{LOGS} . '/../latest-per-pkg/'; + my %dir_size_stats = ( + 'logs_dir_per_package' => 'log_size_per_package', + 'logs_dir_done' => 'log_size_done', + 'logs_dir_latest' => 'log_size_latest', + 'packages_dir_building' => 'package_size_building', + 'packages_dir_latest' => 'package_size_latest', + 'packages_dir_all' => 'package_size_all', + ); + + foreach my $item ( keys(%dir_size_stats) ) { + eval { + if ( defined( $found->{$item} ) ) { + $found->{$item} = abs_path( $found->{$item} ); + if ( defined( $found->{$item} ) ) { + if ( -d $found->{$item} ) { + my @files = read_dir( $found->{$item} ); + foreach my $to_stat (@files) { + if ( -f $found->{$item} . '/' . $to_stat ) { + my ( + $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, + $size, $atime, $mtime, $ctime, $blksize, $blocks + ) = stat( $found->{$item} . '/' . $to_stat ); + $found->{ $dir_size_stats{$item} } += $size; + } + } + $data->{stats}{ $dir_size_stats{$item} } = $found->{ $dir_size_stats{$item} }; + } ## end if ( -d $found->{$item} ) + } ## end if ( defined( $found->{$item} ) ) + } ## end if ( defined( $found->{$item} ) ) + }; + } ## end foreach my $item ( keys(%dir_size_stats) ) + foreach my $item (@poudriere_stats) { if ( $item eq 'TIME' ) { $found->{$item} = time_to_seconds( $found->{$item} ); } - $data->{stats}{$item} += $found->{$item}; + if ( $item =~ /^\d+$/ ) { + $data->{stats}{$item} += $found->{$item}; + } } ## @@ -367,13 +439,13 @@ if ( $? == 0 ) { } elsif ( $line =~ /[\ \t]package[\ \t]/ ) { $type = 'package'; } - if (defined($type)) { + if ( defined($type) ) { $data->{stats}{$type}++; - if (defined($data->{jailANDportsANDset}{$current_section})) { + if ( defined( $data->{jailANDportsANDset}{$current_section} ) ) { $data->{jailANDportsANDset}{$current_section}{$type}++; } } - } ## end elsif ( $line =~ /^\[[0-9]+\].*\/.*\|.*-.*\:/) + } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) } else { $to_return->{error} = 1; From ede113d7865c6745ba705b310661d0a7089c12a3 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 20 Jul 2024 07:26:53 -0500 Subject: [PATCH 437/497] for poudriere add -a for .data.history and -z to zero status stats when not building (#537) * add -a for .data.history * add -z --- snmp/poudriere | 37 ++++++++++++++++++++++++++++++++++--- 1 file changed, 34 insertions(+), 3 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 36244c4f5..2a76a929c 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,11 +6,11 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.1.0 +0.2.0 =head1 SYNOPSIS -poudriere [B<-w>] [B<-b>] [B<-o> ] +poudriere [B<-w>] [B<-b>] [B<-o> ] [B<-a>] [B<-z>] poudriere --help|-h @@ -26,6 +26,10 @@ or if using cron... =head1 FLAGS +=head2 -a + +Include `poudriere status -a` as .data.history . + =head2 -w Write the results out. @@ -34,6 +38,11 @@ Write the results out. Print out the compressed data if GZip+Base64 is smaller. +=head2 -z + +Zero the stats from `poudriere status` if it the status for a jail/ports/set +set is not not building. + =head2 -o Where to write the results to. Defaults to '/var/cache/poudriere.json', @@ -115,10 +124,14 @@ my $write; my $compress; my $version; my $help; +my $history; +my $zero_non_build; GetOptions( + a => \$history, 'o=s' => \$cache_base, w => \$write, b => \$compress, + z => \$zero_non_build, v => \$version, version => \$version, h => \$help, @@ -285,6 +298,17 @@ if ( $? == 0 ) { $found->{FETCH}, $found->{REMAIN}, $found->{TIME}, $found->{LOGS} ) = split( / +/, $status_split[$status_split_int], 14 ); + if ( $zero_non_build && $found->{STATUS} !~ /build/ ) { + $found->{QUEUE} = 0; + $found->{BUILT} = 0; + $found->{FAIL} = 0; + $found->{SKIP} = 0; + $found->{IGNORE} = 0; + $found->{FETCH} = 0; + $found->{REMAIN} = 0; + $found->{TIME} = 0; + } + if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; } @@ -339,7 +363,7 @@ if ( $? == 0 ) { if ( $item eq 'TIME' ) { $found->{$item} = time_to_seconds( $found->{$item} ); } - if ( $item =~ /^\d+$/ ) { + if ( $found->{$item} =~ /^\d+$/ ) { $data->{stats}{$item} += $found->{$item}; } } @@ -447,6 +471,13 @@ if ( $? == 0 ) { } } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) + + # + # include this history if asked to + # + if ($history) { + $data->{history} = `poudriere -N status -a 2> /dev/null`; + } } else { $to_return->{error} = 1; $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; From 9e0106cc3fe6b11522c9e36ef82025ccc7af6130 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 22 Jul 2024 13:42:13 -0500 Subject: [PATCH 438/497] add -q to quite the results and a bit of cleanup (#538) --- snmp/poudriere | 50 +++++++++++++++++++++++++------------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index 2a76a929c..b030da77d 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,11 +6,13 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.2.0 +0.3.0 =head1 SYNOPSIS -poudriere [B<-w>] [B<-b>] [B<-o> ] [B<-a>] [B<-z>] +poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] + +poudriere [<-b>] [B<-a>] [B<-z>] poudriere --help|-h @@ -18,10 +20,14 @@ poudriere --version|-v =head1 SNMPD CONFIG - extend poudriere /etc/snmp/extends/poudriere -b + extend poudriere /usr/local/etc/snmp/poudriere -b -a -z or if using cron... + # cron + 4/5 * * * * root /usr/local/etc/snmp/poudriere -b -a -z -q + + # snmpd.conf extend poudriere cat /var/cache/poudriere.json.snmp =head1 FLAGS @@ -30,13 +36,17 @@ or if using cron... Include `poudriere status -a` as .data.history . -=head2 -w +=head2 -b -Write the results out. +Encapsulate the result in GZip+Base64 if -w is not used. -=head2 -b +=head2 -q -Print out the compressed data if GZip+Base64 is smaller. +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. =head2 -z @@ -126,10 +136,12 @@ my $version; my $help; my $history; my $zero_non_build; +my $if_write_be_quiet; GetOptions( a => \$history, 'o=s' => \$cache_base, w => \$write, + q => \$if_write_be_quiet, b => \$compress, z => \$zero_non_build, v => \$version, @@ -306,8 +318,8 @@ if ( $? == 0 ) { $found->{IGNORE} = 0; $found->{FETCH} = 0; $found->{REMAIN} = 0; - $found->{TIME} = 0; - } + $found->{TIME} = 0; + } ## end if ( $zero_non_build && $found->{STATUS} !~...) if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; @@ -393,7 +405,7 @@ if ( $? == 0 ) { eval { $ps = decode_json( - `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string` + `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null` ); }; if ($@) { @@ -503,16 +515,9 @@ if ($write) { $compressed =~ s/\n//g; $compressed = $compressed . "\n"; my $print_compressed = 0; - if ( length($compressed) > length($raw_json) ) { - write_file( $cache_base . '.snmp', $raw_json ); - } else { - write_file( $cache_base . '.snmp', $compressed ); - $print_compressed = 1; - } + write_file( $cache_base . '.snmp', $compressed ); - if ( $compress && $print_compressed ) { - print $compressed; - } else { + if ( !$if_write_be_quiet ) { print $raw_json; } } else { @@ -527,10 +532,5 @@ if ($write) { my $compressed = encode_base64($compressed_string); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; - my $print_compressed = 0; - if ( length($compressed) > length($raw_json) ) { - print $raw_json; - } else { - print $compressed; - } + print $compressed; } ## end else [ if ($write) ] From 412dce61132adf69c3cb5a2bf898dd5655c0d15f Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 24 Jul 2024 19:16:25 -0500 Subject: [PATCH 439/497] poudriere: add debugging via -d (#540) --- snmp/poudriere | 145 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 127 insertions(+), 18 deletions(-) diff --git a/snmp/poudriere b/snmp/poudriere index b030da77d..6ef1b6314 100755 --- a/snmp/poudriere +++ b/snmp/poudriere @@ -6,13 +6,13 @@ poudriere - LibreNMS JSON style SNMP extend for monitoring Poudriere =head1 VERSION -0.3.0 +0.4.0 =head1 SYNOPSIS -poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] +poudriere B<-w> [B<-o> ] [B<-a>] [B<-z>] [B<-q>] [B<-d>] -poudriere [<-b>] [B<-a>] [B<-z>] +poudriere [<-b>] [B<-a>] [B<-z>] [B<-d>] poudriere --help|-h @@ -40,6 +40,10 @@ Include `poudriere status -a` as .data.history . Encapsulate the result in GZip+Base64 if -w is not used. +=head2 -d + +Debug mode. This is noisy + =head2 -q If -w is specified, do not print the results to stdout. @@ -137,19 +141,26 @@ my $help; my $history; my $zero_non_build; my $if_write_be_quiet; +my $debug; GetOptions( a => \$history, + b => \$compress, + d => \$debug, + h => \$help, + help => \$help, 'o=s' => \$cache_base, - w => \$write, q => \$if_write_be_quiet, - b => \$compress, - z => \$zero_non_build, v => \$version, + w => \$write, version => \$version, - h => \$help, - help => \$help, + z => \$zero_non_build, ); +# include for dumping variables at parts +if ($debug) { + eval "use Data::Dumper; \$Data::Dumper::Sortkeys = 1;"; +} + if ($version) { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); exit 255; @@ -246,17 +257,45 @@ my @poudriere_stats = ( 'QUEUE', 'BUILT', 'FAIL', 'SKIP', 'IGNORE', 'FETCH', 'RE my $status_raw = `poudriere -N status -f -l 2> /dev/null`; if ( $? == 0 ) { + if ($debug) { + print "###\n###\n### poudriere -N status -f -l 2> /dev/null \n###\n###\n" . $status_raw . "\n\n\n"; + } + $data->{status} = $status_raw; - $data->{build_info} = `poudriere -N status -f -b -l 2>&1`; + $data->{build_info} = `poudriere -N status -f -b -l 2>\&1`; + + if ($debug) { + print "###\n###\n### poudriere -N status -f -b -l 2>\&1 \n###\n###\n" + . $data->{build_info} + . "\n\n\n###\n###\n###\n### jls --libxo json \n###\n###\n###\n"; + } + + my $jls; + eval { $jls = decode_json(`jls --libxo json`); }; + if ($@) { + $jls = { 'jail-information' => { jail => [] } }; + if ($debug) { + print "# failed to parse JSON... using empty hash... \n \$@ = " + . $@ + . "\n\$jls = " + . Dumper($jls) + . "\n\n\n"; + } + } else { + if ($debug) { + print "\$jls = " . Dumper($jls) . "\n\n\n"; + } + } + + if ($debug) { + print "###\n###\n###\n### starting line processing for status \n###\n###\n###\n"; + } my @status_split = split( /\n/, $status_raw ); my $status_split_int = 1; while ( defined( $status_split[$status_split_int] ) ) { - - my $jls; - eval { $jls = decode_json(`jls --libxo json`); }; - if ($@) { - $jls = { 'jail-information' => { jail => [] } }; + if ($debug) { + print '#\n#\n# processing line ' . $status_split_int . ': ' . $status_split[$status_split_int] . "\n#\n#\n"; } my $found = { @@ -319,7 +358,13 @@ if ( $? == 0 ) { $found->{FETCH} = 0; $found->{REMAIN} = 0; $found->{TIME} = 0; - } ## end if ( $zero_non_build && $found->{STATUS} !~...) + + if ($debug) { + print '# zeroing... $zero_non_build = true && status = ' . $found->{STATUS} . " !~ /build/\n"; + } + } elsif ($debug) { + print '# not zeroing ... $zero_non_build = false || status = ' . $found->{STATUS} . " =~ /build/\n"; + } if ( $found->{STATUS} ne 'done' ) { $data->{not_done} = 1; @@ -331,6 +376,9 @@ if ( $? == 0 ) { } else { $jailANDportsANDset = $found->{JAIL} . '-' . $found->{PORTS} . '-' . $found->{SET}; } + if ($debug) { + print '# $jailANDportsANDset = ' . $jailANDportsANDset . "\n"; + } $found->{packages_dir_all} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/All'; $found->{packages_dir_latest} = $found->{LOGS} . '/../../../../packages/' . $jailANDportsANDset . '/Latest'; @@ -386,14 +434,30 @@ if ( $? == 0 ) { my @jails; my $jail_regex = '^' . $jailANDportsANDset . '-job-[0-9]+'; my $jls_int = 0; + if ($debug) { + print '# looking for jails matching... /' . $jail_regex . '/ or \'' . $jailANDportsANDset . "'\n"; + } while ( defined( $jls->{'jail-information'}{jail}[$jls_int] ) ) { if ( $jls->{'jail-information'}{jail}[$jls_int]{hostname} eq $jailANDportsANDset || $jls->{'jail-information'}{jail}[$jls_int]{hostname} =~ /$jail_regex/ ) { push( @jails, $jls->{'jail-information'}{jail}[$jls_int]{jid} ); + if ($debug) { + print 'match $jls->{"jail-information"}{"jail"}[' + . $jls_int + . ']{hostname} = ' + . $jls->{'jail-information'}{jail}[$jls_int]{hostname} . "\n"; + } + } else { + if ($debug) { + print '!match $jls->{"jail-information"}{"jail"}[' + . $jls_int + . ']{hostname} = ' + . $jls->{'jail-information'}{jail}[$jls_int]{hostname} . "\n"; + } } $jls_int++; - } + } ## end while ( defined( $jls->{'jail-information'}{jail...})) ## ## if we have found jails, grab the information via ps @@ -401,8 +465,16 @@ if ( $? == 0 ) { if ( defined( $jails[0] ) ) { my $jails_string = join( ',', @jails ); + if ($debug) { + print "# \$jails[0] defined \n# \$jails_string = " . $jails_string . "\n"; + } + my $ps; eval { + if ($debug) { + print + "##\n##\n## ps -o 'jid \%cpu \%mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null\n##\n##\n"; + } $ps = decode_json( `ps -o 'jid %cpu %mem rss cow dsiz etimes inblk jobc majflt minflt msgrcv msgsnd nivcsw nlwp nsigs nswap nvcsw oublk ssiz systime time tsiz usertime' --libxo json -J $jails_string 2> /dev/null` @@ -410,6 +482,12 @@ if ( $? == 0 ) { }; if ($@) { $ps = { 'process-information' => { process => [] } }; + if ($debug) { + print '# JSON parsing errored... using default... ' . $@ . "\n"; + } + } + if ($debug) { + print '$ps = ' . Dumper($ps) . "\n"; } my $ps_int = 0; while ( defined( $ps->{'process-information'}{process}[$ps_int] ) ) { @@ -423,19 +501,38 @@ if ( $? == 0 ) { } $ps_int++; } ## end while ( defined( $ps->{'process-information'}...)) - } ## end if ( defined( $jails[0] ) ) + } else { + if ($debug) { + print "# \$jails[0] is undef\n"; + } + } $data->{jailANDportsANDset}{$jailANDportsANDset} = $found; $status_split_int++; + + if ($debug) { + print "\$data->{jailANDportsANDset}{$jailANDportsANDset} = " + . Dumper( $data->{jailANDportsANDset}{$jailANDportsANDset} ) . " \n\n"; + } } ## end while ( defined( $status_split[$status_split_int...])) + if ($debug) { + print "#\n#\n# processing \$data->{build_info}\n#\n#\n"; + } + my @build_info_split = split( /\n/, $data->{build_info} ); my $current_section; foreach my $line (@build_info_split) { + if ($debug) { + print "# processing line: " . $line . "\n"; + } if ( $line =~ /^\[.*\]\ \[.*\] .*Queued.*Built/ ) { $current_section = $line; $current_section =~ s/^\[//; $current_section =~ s/\].*$//; + if ($debug) { + print '# found section line... \$current_section = ' . $current_section . "\n"; + } } elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) { my $type; if ( $line =~ /[\ \t]check\-sanity[\ \t]/ ) { @@ -480,6 +577,11 @@ if ( $? == 0 ) { if ( defined( $data->{jailANDportsANDset}{$current_section} ) ) { $data->{jailANDportsANDset}{$current_section}{$type}++; } + if ($debug) { + print '# type line found... $type = ' . $type . "\n"; + } + } elsif ($debug) { + print "# line not matched"; } } ## end elsif ( $line =~ /^\[.*\].*\:.*\|.*\:/ ) } ## end foreach my $line (@build_info_split) @@ -489,10 +591,17 @@ if ( $? == 0 ) { # if ($history) { $data->{history} = `poudriere -N status -a 2> /dev/null`; + if ($debug) { + print "#\n#\n# including as .data.history ... poudriere -N status -a 2> /dev/null\n#\n"; + } + } else { + if ($debug) { + print "#\n#\n# not including as .data.history ... poudriere -N status -a 2> /dev/null"; + } } } else { $to_return->{error} = 1; - $to_return->{errorString} = 'non-zero exit for "poudriere status -f"'; + $to_return->{errorString} = 'non-zero exit for "poudriere -N status -f -l"'; } ### From 409b5eacdb791fc1cd3a82bb9f33199c9749dd91 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 24 Jul 2024 19:17:31 -0500 Subject: [PATCH 440/497] zfs: handle a edge case that seems to affect some Linux installs for ZFS #523 --- snmp/zfs | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 6a14acdad..7b2412ced 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.1.0 +0.1.1 =head1 DESCRIPTION @@ -80,8 +80,6 @@ use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; -#$Getopt::Std::STANDARD_HELP_VERSION = 1; - sub main::VERSION_MESSAGE { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); } @@ -289,6 +287,13 @@ my $recently_used_percent; my $frequently_used_percent; if ( !defined( $stats_stuff->{p} ) && defined( $stats_stuff->{mfu_size} ) ) { $stats_stuff->{p} = $stats_stuff->{size} - $stats_stuff->{mfu_size}; +} elsif ( !defined( $stats_stuff->{p} ) + && !defined( $stats_stuff->{mfu_size} ) + && defined( $stats_stuff->{pd} && defined( $stats_stuff->{pm} ) ) ) +{ + # see https://github.com/librenms/librenms-agent/issues/518 + # this should set the value for p in those cases + $stats_stuff->{p} = $stats_stuff->{pd} + $stats_stuff->{pm}; } if ( $stats_stuff->{size} >= $stats_stuff->{c} ) { if ( !defined($mfu_size) ) { From 0b7ff7f707142b0e907c320187d59bd84ed15ae2 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 26 Jul 2024 19:36:35 -0500 Subject: [PATCH 441/497] add snmp/http_access_log_combined (#541) --- snmp/http_access_log_combined | 656 ++++++++++++++++++++++++++++++++++ 1 file changed, 656 insertions(+) create mode 100755 snmp/http_access_log_combined diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined new file mode 100755 index 000000000..af7b841a4 --- /dev/null +++ b/snmp/http_access_log_combined @@ -0,0 +1,656 @@ +#!/usr/local/bin/perl + +=head1 NAME + +http_access_log_combined - LibreNMS JSON style SNMP extend for monitoring Apache style combined HTTP access logs + +=head1 VERSION + +0.1.0 + +=head1 SYNOPSIS + +http_access_log_combined B<-w> [B<-o> ] [B<-a>] [B<-q>] [B<-c> ] + +http_access_log_combined [<-b>] [B<-a>] [B<-z>] [B<-c> ] + +http_access_log_combined --help|-h + +http_access_log_combined --version|-v + +=head1 SNMPD CONFIG + + extend http_access_log_combined /usr/local/etc/snmp/poudriere -b + +or if using cron... + + # cron + 4/5 * * * * root /usr/local/etc/snmp/http_access_log_combined -b -q + + # snmpd.conf + extend poudriere cat /var/cache/http_access_log_combined.json.snmp + +=head1 FLAGS + +=head2 -b + +Encapsulate the result in GZip+Base64 if -w is not used. + +=head2 -c + +Config file to use. + +Default is /usr/local/etc/http_access_log_combined_extend.json . + +=head2 -q + +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/http_access_log_combined.json', +meaning it will be written out to the two locations. + + /var/cache/http_access_log_combined.json + /var/cache/http_access_log_combined.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=head1 CONFIG + +=head2 manual + + - access :: An hash of files to process. + type :: hash + defualt :: {} + + - errors :: An hash of error files to get the size of. The key is matched keys in the access hash. + type :: hash + defualt :: {} + + # a example with a log named foo + { + "access":{ + "foo":"/var/log/www/foo.log" + }, + "error":{ + "foo":"/var/log/www/foo-error.log" + } + } + +=head2 auto + +Auto will attempt to generate a list of log files to process. Will look under the directory specified +for files matching the built regexp. The regexp is built by joining the access/error regexps to the end regexp. +so for access it would be come '-access.log$'. + + - auto :: If auto mode should be used or not. If not defined and .access + is not defined, then it will default to 1. Other wise it is undef, false. + + - auto_dir :: The dir to look for files in. + default :: /var/log/apache/ + + - auto_end_regex :: What to match files ending in. + default :: .log$ + + - auto_access_regex :: What will be prepended to the end regexp for looking for access log files. + default :: -access + + - auto_error_regex :: What will be prepended to the end regexp for looking for error log files. + default :: -error + + # default + { + "auto": 1, + "auto_dir": "/var/log/apache/", + "auto_end_regex": ".log$", + "auto_access_regex": "-access", + "auto_error_regex": "-error", + } + +=head1 REQUIREMENTS + + File::Slurp + MIME-Base64 + JSON + Statistics::Lite + File::ReadBackwards + + # FreeBSD + pkg install p5-File-Slurp p5-MIME-Base64 p5-JSON p5-Statistics-Lite p5-File-ReadBackwards + + # Debian + apt-get install libfile-slurp-perl libmime-base64-perl libjson-perl libstatistics-lite-perl libfile-readbackwards-perl + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; +use File::ReadBackwards; +use Time::Piece; +use Statistics::Lite qw(:all); + +# get what to use for the target time +my $current_time = time; +my $target_time = $current_time - 300; + +# +# parses the specified line +# +sub parse { + my $line_tp_parse = shift; + + my $to_return; + my $rest_of_line; + my $rest_of_line_p2; + + ( $to_return->{host}, $to_return->{user}, $to_return->{date}, $rest_of_line ) + = $line_tp_parse =~ m,^([^\s]+)\s+-\s+([^ ]+)\s+\[(.*?)\]\s+(.*),; + + my @date_split = split( /\s+/, $to_return->{date} ); + my $log_time; + eval { + $log_time = Time::Piece->strptime( $date_split[0] . $date_split[1], '%d/%h/%Y:%H:%M:%S%z' ); + $to_return->{timestamp} = $log_time->epoch; + }; + if ($@) { + $to_return->{timestamp} = 0; + } + + if ( defined($rest_of_line) ) { + ( + $to_return->{method}, $to_return->{path}, $to_return->{proto}, + $to_return->{code}, $to_return->{bytes}, $rest_of_line_p2 + ) = split( /\s/, $rest_of_line, 6 ); + $to_return->{method} =~ tr/\"//d; + $to_return->{proto} =~ tr/\"//d; + + if ( defined($rest_of_line_p2) ) { + my @rest_of_line_p2_split = split( /\"/, $rest_of_line_p2 ); + $to_return->{refer} = $rest_of_line_p2_split[1]; + $to_return->{agent} = $rest_of_line_p2_split[3]; + } + } ## end if ( defined($rest_of_line) ) + + return $to_return; +} ## end sub parse + +#the version of returned data +my $VERSION = 1; + +my $pretty; +my $cache_base = '/var/cache/http_access_log_combined.json'; +my $write; +my $compress; +my $version; +my $help; +my $history; +my $if_write_be_quiet; +my $debug; +my $config_file = '/usr/local/etc/http_access_log_combined_extend.json'; +GetOptions( + b => \$compress, + 'c=s' => \$config_file, + h => \$help, + help => \$help, + 'o=s' => \$cache_base, + q => \$if_write_be_quiet, + v => \$version, + w => \$write, + version => \$version, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +# read in the config file +my $config; +if ( -f $config_file && !-r $config_file ) { + die( $config_file . ' is not readable' ); +} elsif ( -f $config_file ) { + $config = decode_json( read_file($config_file) ); + if ( ref($config) ne 'HASH' ) { + die( '"' . ref($config) . '" is the base ref type for the config instead of HASH' ); + } +} else { + $config = {}; +} +if ( !defined( $config->{access} ) ) { + $config->{auto} = 1; + $config->{access} = {}; +} +if ( !defined( $config->{error} ) ) { + $config->{error} = {}; +} +if ( $config->{auto} ) { + if ( !defined( $config->{auto_dir} ) ) { + $config->{auto_dir} = '/var/log/apache/'; + } + if ( !defined( $config->{auto_end_regex} ) ) { + $config->{auto_end_regex} = '.log$'; + } + if ( !defined( $config->{auto_access_regex} ) ) { + $config->{auto_access_regex} = '-access'; + } + if ( !defined( $config->{auto_error_regex} ) ) { + $config->{auto_error_regex} = '-error'; + } + if ( -d $config->{auto_dir} && -r $config->{auto_dir} ) { + my $access_log_regex = $config->{auto_access_regex} . $config->{auto_end_regex}; + my $error_log_regex = $config->{auto_access_regex} . $config->{auto_end_regex}; + my @dir = read_dir( $config->{auto_dir} ); + foreach my $dir_entry (@dir) { + my $full_path = $config->{auto_dir} . '/' . $dir_entry; + if ( -f $full_path && -r $full_path && $dir_entry =~ /$access_log_regex/ ) { + my $name = $dir_entry; + $name =~ s/$access_log_regex//; + $config->{access}{$name} = $full_path; + } elsif ( -f $full_path && -r $full_path && $dir_entry =~ /$error_log_regex/ ) { + my $name = $dir_entry; + $name =~ s/$error_log_regex//; + $config->{error}{$name} = $full_path; + } + } ## end foreach my $dir_entry (@dir) + } ## end if ( -d $config->{auto_dir} && -r $config->...) +} ## end if ( $config->{auto} ) + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + totals => { + 'GET' => 0, + 'HEAD' => 0, + 'POST' => 0, + 'PUT' => 0, + 'DELETE' => 0, + 'CONNECT' => 0, + 'OPTIONS' => 0, + 'PATCH' => 0, + refer => 0, + no_refer => 0, + user => 0, + no_user => 0, + bytes => 0, + bytes_min => 0, + bytes_max => 0, + bytes_range => 0, + bytes_mean => 0, + bytes_median => 0, + bytes_mode => 0, + http1_0 => 0, + http1_1 => 0, + http2 => 0, + http3 => 0, + '1xx' => 0, + '100' => 0, + '101' => 0, + '102' => 0, + '103' => 0, + '2xx' => 0, + '200' => 0, + '201' => 0, + '202' => 0, + '203' => 0, + '204' => 0, + '205' => 0, + '206' => 0, + '207' => 0, + '208' => 0, + '218' => 0, + '226' => 0, + '3xx' => 0, + '301' => 0, + '302' => 0, + '303' => 0, + '304' => 0, + '305' => 0, + '306' => 0, + '307' => 0, + '308' => 0, + '4xx' => 0, + '400' => 0, + '401' => 0, + '402' => 0, + '403' => 0, + '404' => 0, + '405' => 0, + '406' => 0, + '407' => 0, + '408' => 0, + '409' => 0, + '410' => 0, + '411' => 0, + '412' => 0, + '413' => 0, + '414' => 0, + '415' => 0, + '416' => 0, + '417' => 0, + '419' => 0, + '420' => 0, + '421' => 0, + '422' => 0, + '423' => 0, + '424' => 0, + '425' => 0, + '429' => 0, + '431' => 0, + '444' => 0, + '451' => 0, + '494' => 0, + '495' => 0, + '496' => 0, + '497' => 0, + '499' => 0, + '5xx' => 0, + '500' => 0, + '501' => 0, + '502' => 0, + '503' => 0, + '504' => 0, + '505' => 0, + '506' => 0, + '507' => 0, + '508' => 0, + '509' => 0, + '510' => 0, + '511' => 0, + size => 0, + error_size => 0, + }, + logs => { + + }, +}; +my @bytes_total; + +foreach my $log_name ( keys( %{ $config->{access} } ) ) { + my @bytes_log; + my $new_entry = { + GET => 0, + HEAD => 0, + POST => 0, + PUT => 0, + DELETE => 0, + CONNECT => 0, + OPTIONS => 0, + PATCH => 0, + refer => 0, + no_refer => 0, + user => 0, + no_user => 0, + bytes => 0, + bytes_min => 0, + bytes_max => 0, + bytes_range => 0, + bytes_mean => 0, + bytes_median => 0, + bytes_mode => 0, + http1_0 => 0, + http1_1 => 0, + http2 => 0, + http3 => 0, + '1xx' => 0, + '100' => 0, + '101' => 0, + '102' => 0, + '103' => 0, + '2xx' => 0, + '200' => 0, + '201' => 0, + '202' => 0, + '203' => 0, + '204' => 0, + '205' => 0, + '206' => 0, + '207' => 0, + '208' => 0, + '218' => 0, + '226' => 0, + '3xx' => 0, + '301' => 0, + '302' => 0, + '303' => 0, + '304' => 0, + '305' => 0, + '306' => 0, + '307' => 0, + '308' => 0, + '4xx' => 0, + '400' => 0, + '401' => 0, + '402' => 0, + '403' => 0, + '404' => 0, + '405' => 0, + '406' => 0, + '407' => 0, + '408' => 0, + '409' => 0, + '410' => 0, + '411' => 0, + '412' => 0, + '413' => 0, + '414' => 0, + '415' => 0, + '416' => 0, + '417' => 0, + '419' => 0, + '420' => 0, + '421' => 0, + '422' => 0, + '423' => 0, + '424' => 0, + '425' => 0, + '429' => 0, + '431' => 0, + '444' => 0, + '451' => 0, + '494' => 0, + '495' => 0, + '496' => 0, + '497' => 0, + '499' => 0, + '5xx' => 0, + '500' => 0, + '501' => 0, + '502' => 0, + '503' => 0, + '504' => 0, + '505' => 0, + '506' => 0, + '507' => 0, + '508' => 0, + '509' => 0, + '510' => 0, + '511' => 0, + size => 0, + error_size => 0, + }; + + eval { + my ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{access}{$log_name} ); + $new_entry->{size} = $size; + $data->{totals}{size} = $data->{totals}{size} + $size; + + if ( defined( $config->{errors}{$log_name} ) ) { + if ( -f $config->{errors}{$log_name} ) { + ( $dev, $ino, $mode, $nlink, $uid, $gid, $rdev, $size, $atime, $mtime, $ctime, $blksize, $blocks ) + = stat( $config->{errors}{$log_name} ); + $new_entry->{error_size} = $size; + $data->{totals}{error_size} = $data->{totals}{error_size} + $size; + } + } + }; + + eval { + my $bw = File::ReadBackwards->new( $config->{access}{$log_name} ); + my $line = $bw->readline; + my $process_log = 1; + while ( $process_log && defined($line) ) { + my $parsed = parse($line); + + # if not defined log_time, we faised to process the log time... don't process this entry + if ( $parsed->{timestamp} < $target_time ) { + # if true, then don't contiue process thig log file as we are now before the target time + $process_log = 0; + } else { + if ( defined( $parsed->{bytes} ) && $parsed->{bytes} =~ /^[0-9]+$/ ) { + $data->{totals}{bytes} += $parsed->{bytes}; + $new_entry->{bytes} += $parsed->{bytes}; + push( @bytes_total, $parsed->{bytes} ); + push( @bytes_log, $parsed->{bytes} ); + } + } + + if ( defined( $parsed->{proto} ) && defined( $new_entry->{ $parsed->{proto} } ) ) { + $new_entry->{ $parsed->{proto} }++; + $data->{totals}{ $parsed->{proto} }++; + } + + if ( defined( $parsed->{method} ) && defined( $new_entry->{ $parsed->{method} } ) ) { + $new_entry->{ $parsed->{method} }++; + $data->{totals}{ $parsed->{method} }++; + } + + if ( defined( $parsed->{code} ) ) { + if ( defined( $new_entry->{ $parsed->{code} } ) ) { + $new_entry->{ $parsed->{code} }++; + $data->{totals}{ $parsed->{code} }++; + } + if ( $parsed->{code} =~ /^1\d\d$/ ) { + $new_entry->{'1xx'}++; + $data->{totals}{'1xx'}++; + } elsif ( $parsed->{code} =~ /^2\d\d$/ ) { + $new_entry->{'2xx'}++; + $data->{totals}{'2xx'}++; + } elsif ( $parsed->{code} =~ /^3\d\d$/ ) { + $new_entry->{'3xx'}++; + $data->{totals}{'3xx'}++; + } elsif ( $parsed->{code} =~ /^4\d\d$/ ) { + $new_entry->{'4xx'}++; + $data->{totals}{'4xx'}++; + } elsif ( $parsed->{code} =~ /^5\d\d$/ ) { + $new_entry->{'5xx'}++; + $data->{totals}{'5xx'}++; + } + } ## end if ( defined( $parsed->{code} ) ) + + if ( defined( $parsed->{proto} ) ) { + if ( $parsed->{proto} eq 'HTTP/1.0' ) { + $new_entry->{'http1_0'}++; + $data->{totals}{'http1_0'}++; + } elsif ( $parsed->{proto} eq 'HTTP/1.1' ) { + $new_entry->{'http1_1'}++; + $data->{totals}{'http1_1'}++; + } elsif ( $parsed->{proto} eq 'HTTP/2' ) { + $new_entry->{'http2'}++; + $data->{totals}{'http2'}++; + } elsif ( $parsed->{proto} eq 'HTTP/3' ) { + $new_entry->{'http3'}++; + $data->{totals}{'http3'}++; + } + } ## end if ( defined( $parsed->{proto} ) ) + + if ( defined( $parsed->{user} ) ) { + if ( $parsed->{user} eq '-' ) { + $new_entry->{'no_user'}++; + $data->{totals}{'no_user'}++; + } else { + $new_entry->{'user'}++; + $data->{totals}{'user'}++; + } + } + + if ( defined( $parsed->{refer} ) ) { + if ( $parsed->{refer} eq '-' ) { + $new_entry->{'no_refer'}++; + $data->{totals}{'no_refer'}++; + } else { + $new_entry->{'refer'}++; + $data->{totals}{'refer'}++; + } + } + + if ($process_log) { + $line = $bw->readline; + } + } ## end while ( $process_log && defined($line) ) + }; + if ( defined( $bytes_log[0] ) ) { + $new_entry->{bytes_min} = min(@bytes_log); + $new_entry->{bytes_max} = max(@bytes_log); + $new_entry->{bytes_mean} = mean(@bytes_log); + $new_entry->{bytes_median} = median(@bytes_log); + $new_entry->{bytes_mode} = mode(@bytes_log); + $new_entry->{bytes_range} = range(@bytes_log); + } + $data->{logs}{$log_name} = $new_entry; + +} ## end foreach my $log_name ( keys( %{ $config->{access...}})) + +if ( defined( $bytes_total[0] ) ) { + $data->{totals}{bytes_min} = min(@bytes_total); + $data->{totals}{bytes_max} = max(@bytes_total); + $data->{totals}{bytes_mean} = mean(@bytes_total); + $data->{totals}{bytes_median} = median(@bytes_total); + $data->{totals}{bytes_mode} = mode(@bytes_total); + $data->{totals}{bytes_range} = range(@bytes_total); +} + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + write_file( $cache_base . '.snmp', $compressed ); + + if ( !$if_write_be_quiet ) { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end else [ if ($write) ] From 45c1ed1834b78dc03e5a409b65f60a0736260817 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 27 Jul 2024 10:54:19 -0500 Subject: [PATCH 442/497] http_access_log_combined: add 300, 426, and 428 response codes and a few POD fixes (#542) --- snmp/http_access_log_combined | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined index af7b841a4..dfb8cfc12 100755 --- a/snmp/http_access_log_combined +++ b/snmp/http_access_log_combined @@ -20,7 +20,7 @@ http_access_log_combined --version|-v =head1 SNMPD CONFIG - extend http_access_log_combined /usr/local/etc/snmp/poudriere -b + extend http_access_log_combined /usr/local/etc/snmp/http_access_log_combined -b or if using cron... @@ -28,7 +28,7 @@ or if using cron... 4/5 * * * * root /usr/local/etc/snmp/http_access_log_combined -b -q # snmpd.conf - extend poudriere cat /var/cache/http_access_log_combined.json.snmp + extend http_access_log_combined cat /var/cache/http_access_log_combined.json.snmp =head1 FLAGS @@ -321,6 +321,7 @@ my $data = { '218' => 0, '226' => 0, '3xx' => 0, + '300' => 0, '301' => 0, '302' => 0, '303' => 0, @@ -355,6 +356,8 @@ my $data = { '423' => 0, '424' => 0, '425' => 0, + '426' => 0, + '428' => 0, '429' => 0, '431' => 0, '444' => 0, @@ -430,6 +433,7 @@ foreach my $log_name ( keys( %{ $config->{access} } ) ) { '218' => 0, '226' => 0, '3xx' => 0, + '300' => 0, '301' => 0, '302' => 0, '303' => 0, @@ -464,6 +468,8 @@ foreach my $log_name ( keys( %{ $config->{access} } ) ) { '423' => 0, '424' => 0, '425' => 0, + '426' => 0, + '428' => 0, '429' => 0, '431' => 0, '444' => 0, From 416d473d36c69938e41a1547aac748538d3da393 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 28 Jul 2024 12:48:44 -0500 Subject: [PATCH 443/497] add extend for Samba (#543) * start work on samba * finalize Samba extend --- snmp/samba | 444 +++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 444 insertions(+) create mode 100755 snmp/samba diff --git a/snmp/samba b/snmp/samba new file mode 100755 index 000000000..418335974 --- /dev/null +++ b/snmp/samba @@ -0,0 +1,444 @@ +#!/usr/bin/env perl + +=head1 NAME + +samba - LibreNMS JSON style SNMP extend for monitoring Samba + +=head1 VERSION + +0.1.0 + +=head1 SYNOPSIS + +samba B<-w> [B<-o> ] [B<-q>] + +samba [<-b>] [B<-d>] + +samba --help|-h + +samba --version|-v + +=head1 SNMPD CONFIG + + extend samba /usr/local/etc/snmp/samba -b -a -z + +or if using cron... + + # cron + 4/5 * * * * root /usr/local/etc/snmp/samba -b -a -z -q + + # snmpd.conf + extend samba cat /var/cache/samba.json.snmp + +=head1 FLAGS + +=head2 -b + +Encapsulate the result in GZip+Base64 if -w is not used. + +=head2 -q + +If -w is specified, do not print the results to stdout. + +=head2 -w + +Write the results out. + +=head2 -o + +Where to write the results to. Defaults to '/var/cache/samba.json', +meaning it will be written out to the two locations. + + /var/cache/samba.json + /var/cache/samba.json.snmp + +The later is for use with returning data for SNMP. Will be compressed +if possible. + +=head1 REQUIREMENTS + + File::Slurp + MIME::Base64 + JSON + + # FreeBSD + pkg add p5-File-Slurp p5-MIME-Base64 p5-JSON + + # Debian + apt-get install libfile-slurp-perl libmime-base64-perl libjson-perl + +=cut + +use strict; +use warnings; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use JSON; + +#the version of returned data +my $VERSION = 1; + +# ensure sbin is in the path +$ENV{PATH} = $ENV{PATH} . ':/sbin:/usr/sbin:/usr/local/sbin:/bin:/usr/bin:/usr/local/bin'; + +my $pretty; +my $cache_base = '/var/cache/samba.json'; +my $write; +my $compress; +my $version; +my $help; +my $if_write_be_quiet; +GetOptions( + b => \$compress, + h => \$help, + help => \$help, + 'o=s' => \$cache_base, + q => \$if_write_be_quiet, + v => \$version, + w => \$write, + version => \$version, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); + exit 255; +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); + exit 255; +} + +#the data to return +my $to_return = { + 'version' => $VERSION, + 'error' => '0', + 'errorString' => '', +}; +my $data = { + general => { + connect_count => undef, + disconnect_count => undef, + idle_count => undef, + cpu_user_time => undef, + cpu_system_time => undef, + request_count => undef, + push_sec_ctx_count => undef, + push_sec_ctx_time => undef, + set_sec_ctx_count => undef, + set_sec_ctx_time => undef, + set_root_sec_ctx_count => undef, + set_root_sec_ctx_time => undef, + pop_sec_ctx_count => undef, + pop_sec_ctx_time => undef, + syscall_count => 0, + syscall_time => 0, + syscall_idle => 0, + syscall_bytes => 0, + syscall_read_bytes => 0, + syscall_read_time => 0, + syscall_read_idle => 0, + syscall_read_count => 0, + syscall_write_bytes => 0, + syscall_write_count => 0, + syscall_write_time => 0, + syscall_write_idle => 0, + syscall_other_count => 0, + syscall_other_time => 0, + acl_count => 0, + acl_time => 0, + acl_get_count => 0, + acl_get_time => 0, + acl_set_count => 0, + acl_set_time => 0, + statcache_lookups_count => undef, + statcache_misses_count => undef, + statcache_hits_count => undef, + smb_count => 0, + smb_time => 0, + smb_read_count => 0, + smb_read_time => 0, + smb_write_count => 0, + smb_write_time => 0, + smb_other_count => 0, + smb_other_time => 0, + smb2_count => 0, + smb2_time => 0, + smb2_bytes => 0, + smb2_idle => 0, + smb2_read_count => 0, + smb2_read_time => 0, + smb2_read_bytes => 0, + smb2_read_idle => 0, + smb2_write_count => 0, + smb2_write_time => 0, + smb2_write_bytes => 0, + smb2_write_idle => 0, + smb2_other_count => 0, + smb2_other_time => 0, + trans2_time => 0, + trans2_count => 0, + nt_transact_time => 0, + nt_transact_count => 0, + }, + procs => [], + shares => [], +}; + +### +### +### get profiling info via smbstatus -P +### +### +my @profiling_lines = grep( !/^\*/, split( /\n/, `smbstatus -P 2> /dev/null` ) ); +foreach my $line (@profiling_lines) { + $line =~ s/\s//g; + my @line_split = split( /\:/, $line ); + if ( $line_split[1] =~ /^[0-9]+$/ ) { + if ( $line_split[0] =~ /^syscall_/ ) { + if ( $line_split[0] =~ /read/ || $line_split[0] =~ /recv/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_read_count} = $data->{general}{syscall_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{syscall_bytes} = $data->{general}{syscall_bytes} + $line_split[1]; + $data->{general}{syscall_read_bytes} = $data->{general}{syscall_read_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_read_time} = $data->{general}{syscall_read_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_read_idle} = $data->{general}{syscall_read_idle} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ || $line_split[0] =~ /send/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_write_count} = $data->{general}{syscall_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{syscall_bytes} = $data->{general}{syscall_bytes} + $line_split[1]; + $data->{general}{syscall_write_bytes} = $data->{general}{syscall_write_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_write_time} = $data->{general}{syscall_write_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_write_idle} = $data->{general}{syscall_write_idle} + $line_split[1]; + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{syscall_count} = $data->{general}{syscall_count} + $line_split[1]; + $data->{general}{syscall_other_count} = $data->{general}{syscall_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{syscall_time} = $data->{general}{syscall_time} + $line_split[1]; + $data->{general}{syscall_other_time} = $data->{general}{syscall_other_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{syscall_idle} = $data->{general}{syscall_idle} + $line_split[1]; + $data->{general}{syscall_other_idle} = $data->{general}{syscall_other_idle} + $line_split[1]; + } + } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] + } elsif ( $line_split[0] =~ /^[fgs]+et_nt_acl/ ) { + if ( $line_split[0] =~ /get/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{acl_count} = $data->{general}{acl_count} + $line_split[1]; + $data->{general}{acl_get_count} = $data->{general}{acl_get_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{acl_time} = $data->{general}{acl_time} + $line_split[1]; + $data->{general}{acl_get_time} = $data->{general}{acl_get_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /set/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{acl_count} = $data->{general}{acl_count} + $line_split[1]; + $data->{general}{acl_set_count} = $data->{general}{acl_set_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{acl_time} = $data->{general}{acl_time} + $line_split[1]; + $data->{general}{acl_set_time} = $data->{general}{acl_set_time} + $line_split[1]; + } + } + } elsif ( $line_split[0] =~ /^SMB/ ) { + # Samba apparent does not have byte counters for these... that said looks like this one is not really used any more + if ( $line_split[0] =~ /read/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_read_count} = $data->{general}{smb_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_read_time} = $data->{general}{smb_read_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_write_count} = $data->{general}{smb_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_write_time} = $data->{general}{smb_write_time} + $line_split[1]; + } + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb_count} = $data->{general}{smb_count} + $line_split[1]; + $data->{general}{smb_other_count} = $data->{general}{smb_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb_time} = $data->{general}{smb_time} + $line_split[1]; + $data->{general}{smb_other_time} = $data->{general}{smb_other_time} + $line_split[1]; + } + } + } elsif ( $line_split[0] =~ /^Trans2_/ ) { + # Samba does not appear to have any that are read/write for this really... also no bytes coutners + if ( $line_split[0] =~ /count/ ) { + $data->{general}{trans2_count} = $data->{general}{trans2_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{trans2_time} = $data->{general}{trans2_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /^NT_transact_/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{nt_transact_count} = $data->{general}{nt_transact_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{nt_transact_time} = $data->{general}{nt_transact_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /^smb2_/ ) { + if ( $line_split[0] =~ /read/ || $line_split[0] =~ /recv/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_read_count} = $data->{general}{smb2_read_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{smb2_bytes} = $data->{general}{smb2_bytes} + $line_split[1]; + $data->{general}{smb2_read_bytes} = $data->{general}{smb2_read_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_read_time} = $data->{general}{smb2_read_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_read_idle} = $data->{general}{smb2_read_idle} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /write/ || $line_split[0] =~ /send/ ) { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_write_count} = $data->{general}{smb2_write_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /bytes/ ) { + $data->{general}{smb2_bytes} = $data->{general}{smb2_bytes} + $line_split[1]; + $data->{general}{smb2_write_bytes} = $data->{general}{smb2_write_bytes} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_write_time} = $data->{general}{smb2_write_time} + $line_split[1]; + } + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_write_idle} = $data->{general}{smb2_write_idle} + $line_split[1]; + } else { + if ( $line_split[0] =~ /count/ ) { + $data->{general}{smb2_count} = $data->{general}{smb2_count} + $line_split[1]; + $data->{general}{smb2_other_count} = $data->{general}{smb2_other_count} + $line_split[1]; + } elsif ( $line_split[0] =~ /time/ ) { + $data->{general}{smb2_time} = $data->{general}{smb2_time} + $line_split[1]; + $data->{general}{smb2_other_time} = $data->{general}{smb2_other_time} + $line_split[1]; + } elsif ( $line_split[0] =~ /idle/ ) { + $data->{general}{smb2_idle} = $data->{general}{smb2_idle} + $line_split[1]; + $data->{general}{smb2_other_idle} = $data->{general}{smb2_other_idle} + $line_split[1]; + } + } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] + } else { + $data->{general}{ $line_split[0] } = $line_split[1]; + } + } ## end if ( $line_split[1] =~ /^[0-9]+$/ ) +} ## end foreach my $line (@profiling_lines) + +### +### +### get process info via smbstatus -p +### +### +my @process_lines = grep( /^\d/, split( /\n/, `smbstatus -p 2> /dev/null` ) ); +foreach my $line (@process_lines) { +# lines look like this +# 5420 bar foo 192.168.1.2 (ipv4:192.168.1.2:497) SMB3_11 - partial(AES-128-CMAC) + my $new_proc = {}; + my $client_info; + ( + $new_proc->{pid}, $new_proc->{user}, $new_proc->{group}, $new_proc->{machine}, + $client_info, $new_proc->{version}, $new_proc->{encryption}, $new_proc->{signing}, + ) = split( /\s+/, $line, 8 ); + $client_info =~ s/^\(//; + $client_info =~ s/\)$//; + $new_proc->{ip} = $client_info; + $new_proc->{ip} =~ s/^[a-zA-Z0-9]+\://; + $new_proc->{ip} =~ s/:\d+$//; + $new_proc->{ip} =~ s/[\[\]]//g; + $new_proc->{port} = $client_info; + $new_proc->{port} =~ s/.*\]//g; + $new_proc->{port} =~ s/.*\://g; + + push( @{ $data->{procs} }, $new_proc ); +} ## end foreach my $line (@process_lines) + +### +### +### get share info via smbstatus -S +### +### +my @share_lines = grep( /^\w+\s+\d+/, split( /\n/, `smbstatus -S 2> /dev/null` ) ); +foreach my $line (@share_lines) { + # lines look like... sometimes spaces on the end + # foo 5423 192.168.1.2 Tue Jul 16 02:39:53 2024 CDT - - + my $new_share = {}; + my $rest_of_line; + ( $new_share->{service}, $new_share->{pid}, $new_share->{machine}, $rest_of_line ) = split( /\s+/, $line, 4 ); + $rest_of_line =~ s/\s+$//; + # reverse it to make parsing out the date easy + $rest_of_line = reverse $rest_of_line; + ( $new_share->{signing}, $new_share->{encryption}, $new_share->{connected_at} ) = split( /\s+/, $rest_of_line, 3 ); + $new_share->{signing} = reverse $new_share->{signing}; + $new_share->{encryption} = reverse $new_share->{encryption}; + $new_share->{connected_at} = reverse $new_share->{connected_at}; + + push( @{ $data->{shares} }, $new_share ); +} ## end foreach my $line (@share_lines) + +### +### +### get locks info via smbstatus -L +### +### +my @lock_lines = grep( /^\d+\s+/, split( /\n/, `smbstatus -L 2> /dev/null` ) ); +$data->{general}{lock_count} = $#lock_lines + 1; + +### +### +### finalize it +### +### + +#add the data has to the return hash +$to_return->{data} = $data; + +#finally render the JSON +my $raw_json = encode_json($to_return); +if ($write) { + write_file( $cache_base, $raw_json ); + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + my $print_compressed = 0; + write_file( $cache_base . '.snmp', $compressed ); + + if ( !$if_write_be_quiet ) { + print $raw_json; + } +} else { + if ( !$compress ) { + print $raw_json. "\n"; + exit; + } + + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; +} ## end else [ if ($write) ] From 286f0b78c7af668bd8c4890dbfe12ac2697aea2e Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 30 Jul 2024 13:29:59 -0500 Subject: [PATCH 444/497] snmp/samba: for generic vars, ensure we have a value (#544) --- snmp/samba | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snmp/samba b/snmp/samba index 418335974..f3c6428c8 100755 --- a/snmp/samba +++ b/snmp/samba @@ -339,7 +339,9 @@ foreach my $line (@profiling_lines) { } } ## end else [ if ( $line_split[0] =~ /read/ || $line_split...)] } else { - $data->{general}{ $line_split[0] } = $line_split[1]; + if (defined($line_split[1])) { + $data->{general}{ $line_split[0] } = $line_split[1]; + } } } ## end if ( $line_split[1] =~ /^[0-9]+$/ ) } ## end foreach my $line (@profiling_lines) From 6c187e7cafaf3600b7e5773c79c83990d64535bc Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Tue, 30 Jul 2024 13:30:41 -0500 Subject: [PATCH 445/497] http_access_log_combined: use env to call perl (#545) --- snmp/http_access_log_combined | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined index dfb8cfc12..93c82e89a 100755 --- a/snmp/http_access_log_combined +++ b/snmp/http_access_log_combined @@ -1,4 +1,4 @@ -#!/usr/local/bin/perl +#!/usr/bin/env perl =head1 NAME From e8def552ec22288c89e18b5c4049bb3cca161bc6 Mon Sep 17 00:00:00 2001 From: Tr4sK Date: Tue, 1 Oct 2024 16:37:34 +0200 Subject: [PATCH 446/497] Gather vlan names from /interface vlan (#524) thanks! --- snmp/Routeros/LNMS_vlans.scr | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/snmp/Routeros/LNMS_vlans.scr b/snmp/Routeros/LNMS_vlans.scr index 3ac920ed5..dd12825de 100644 --- a/snmp/Routeros/LNMS_vlans.scr +++ b/snmp/Routeros/LNMS_vlans.scr @@ -9,13 +9,19 @@ :foreach i in [/interface bridge vlan find] do={ :local intf [/interface bridge vlan get $i bridge] :local vlid [/interface bridge vlan get $i vlan-ids] + :local vname + + :foreach i in [/interface vlan find where vlan-id=$vlid] do={ + :local intname [/interface vlan get $i name] + :set $vname ($intname) + } :foreach t in [/interface bridge vlan get $i tagged] do={ - :set $vlanst ($vlanst, "$vlid,$t") + :set $vlanst ($vlanst, "$vlid,$t,$vname") } :foreach u in [/interface bridge vlan get $i current-untagged] do={ - :set $vlansu ($vlansu, "$vlid,$u") + :set $vlansu ($vlansu, "$vlid,$u,$vname") } :foreach u in [/interface bridge port find where bridge=$intf and pvid=$vlid] do={ @@ -28,7 +34,7 @@ } } :if ( $fl != 1 ) do={ - :set $vlansu ($vlansu, "$vlid,$iu") + :set $vlansu ($vlansu, "$vlid,$iu,$vname") } } } @@ -36,6 +42,7 @@ :foreach vl in [/interface vlan find ] do={ :local intf [/interface vlan get $vl interface] :local vlid [/interface vlan get $vl vlan-id] + :local vname [/interface vlan get $vl name] :local fl 0 :foreach tmp in $vlanst do={ @@ -45,7 +52,7 @@ } } :if ( $fl != 1 ) do={ - :set $vlanst ($vlanst, "$vlid,$intf") + :set $vlanst ($vlanst, "$vlid,$intf,$vname") } } From 55d74ff2acdaee4546a80acb2f7f5f587717938c Mon Sep 17 00:00:00 2001 From: samburney Date: Wed, 2 Oct 2024 00:12:03 +0930 Subject: [PATCH 447/497] Update powerdns.py to use 'list' instead of 'show *' (#514) thanks! --- snmp/powerdns.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/powerdns.py b/snmp/powerdns.py index 088273da7..74e9517b0 100755 --- a/snmp/powerdns.py +++ b/snmp/powerdns.py @@ -6,7 +6,7 @@ pdnscontrol = "/usr/bin/pdns_control" process = subprocess.Popen( - [pdnscontrol, "show", "*"], stdout=subprocess.PIPE, stderr=subprocess.PIPE + [pdnscontrol, "list"], stdout=subprocess.PIPE, stderr=subprocess.PIPE ) input = process.communicate() stdout = input[0].decode() From fe46096653cc020536b07779a0b5d735fe2785d4 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Wed, 2 Oct 2024 10:56:22 -0500 Subject: [PATCH 448/497] opensearch extend update add -w (#547) * rework to use pod2doc and do compression * tweak this a bit * derp, fix the compress stuff a bit more * finish some doc stuff --- snmp/opensearch | 197 +++++++++++++++++++++++++++++++++++------------- 1 file changed, 146 insertions(+), 51 deletions(-) diff --git a/snmp/opensearch b/snmp/opensearch index 11b57edd4..7341072ea 100755 --- a/snmp/opensearch +++ b/snmp/opensearch @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -23,63 +23,143 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -=for comment +use warnings; +use strict; + +=pod + +=head1 NAME + +opensearch - LibreNMS JSON SNMP extend for gathering backups for borg + +=head1 VERSION + +0.1.0 + +=cut -Add this to snmpd.conf as below and restart snmpd. +our $VERSION = '0.1.0'; + +=head1 SYNOPSIS + +opensearch [B<-a> ] [B<-c> ] [B<-h> ] [B<-p> ] [B<-S>] +[B<-I>] [B<-P>] [B<-S>] [B<-w>] [B<-o> ] + +opensearch [B<--help>] + +opensearch [B<--version>] + +=head1 DESCRIPTION + +Needs enabled in snmpd.conf like below. extend opensearch /etc/snmp/extends/opensearch -Supported command line options are as below. +If you have issues with it timing taking to long to poll and +occasionally timing out, you can set it up in cron like this. + + */5 * * * * /etc/snmp/extends/opensearch -q -w + +And then in snmpd.conf like below. + + extend opensearch /bin/cat /var/cache/opensearch_extend.json.snmp + +Installing the depends can be done like below. + + # FreeBSD + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-libwww p5-LWP-Protocol-https + + # Debian + apt-get install libjson-perl libfile-slurp-perl liblwp-protocol-https-perl + +=head1 FLAGS + +=head2 -a + +Auth token path. + +=head2 -c + +CA file path. + +Default: empty + +=head2 -h + +The host to connect to. + +Default: 127.0.0.1 + +=head2 -I + +Do not verify hostname (when used with -S). + +=head2 -o + +The base name for the output. + +Default: /var/cache/opensearch_extend.json - -a Auth token path. - -c CA file path. - Default: empty - -h The host to connect to. - Default: 127.0.0.1 - -p The port to use. - Default: 9200 - -S Use https instead of http. - -I Do not verify hostname (when used with -S). - -P Pretty print. - -S Use HTTPS. +=head2 -p + +The port to use. + +Default: 9200 + +=head2 -P + +Pretty print. + +=head2 -q + +Do not print the output. + +Useful for with -w. + +=head2 -S + +Use HTTPS. The last is only really relevant to the usage with SNMP. +=head2 -w + +Write the results out to two files based on what is specified +via -o . + +Default Raw JSON: /var/cache/opensearch_extend.json + +Default SNMP Return: /var/cache/opensearch_extend.json.snmp + =cut -use warnings; -use strict; use Getopt::Std; use JSON; use LWP::UserAgent (); +use File::Slurp; +use Pod::Usage; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Elastic/Opensearch SNMP extend 0.0.0\n"; + print 'opensearch LibreNMS extend version '.$VERSION."\n"; } sub main::HELP_MESSAGE { - print "\n" - . "-a Auth token path.\n" - . "-c CA file path.\n" - . "-h The host to connect to.\n" - . " Default: 127.0.0.1\n" - . "-p The port to use.\n" - . " Default: 9200\n" - . "-S Use https instead of http.\n" - . "-I Do not verify hostname (when used with -S).\n" - . "-P Pretty print.\n"; + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } -my $protocol = 'http'; -my $host = '127.0.0.1'; -my $port = 9200; -my $schema = 'http'; +my $protocol = 'http'; +my $host = '127.0.0.1'; +my $port = 9200; +my $schema = 'http'; +my $output_base = '/var/cache/opensearch_extend.json'; #gets the options my %opts; -getopts( 'a:c:h:p:PIS', \%opts ); +getopts( 'a:c:h:p:PISqo:w', \%opts ); if ( defined( $opts{h} ) ) { $host = $opts{h}; } @@ -89,6 +169,9 @@ if ( defined( $opts{p} ) ) { if ( $opts{S} ) { $schema = 'https'; } +if ( defined( $opts{o} ) ) { + $output_base = $opts{o}; +} my $auth_token; if ( defined( $opts{a} ) ) { @@ -124,12 +207,11 @@ my $stats_response = $ua->get($stats_url); if ( defined( $opts{c} ) ) { # set ca file - $ua->ssl_opts( SSL_ca_file => $opts{c}); + $ua->ssl_opts( SSL_ca_file => $opts{c} ); } -my $stats_response; if ( defined( $opts{a} ) ) { - $stats_response = $ua->get($stats_url, "Authorization" => $auth_token,); + $stats_response = $ua->get( $stats_url, "Authorization" => $auth_token, ); } else { $stats_response = $ua->get($stats_url); } @@ -146,8 +228,7 @@ if ( $stats_response->is_success ) { } exit; } -} -else { +} else { $to_return->{errorString} = 'Failed to get "' . $stats_url . '"... ' . $stats_response->status_line; $to_return->{error} = 1; print $json->encode($to_return); @@ -159,7 +240,7 @@ else { my $health_response; if ( defined( $opts{a} ) ) { - $health_response = $ua->get($health_url, "Authorization" => $auth_token,); + $health_response = $ua->get( $health_url, "Authorization" => $auth_token, ); } else { $health_response = $ua->get($health_url); } @@ -176,8 +257,7 @@ if ( $health_response->is_success ) { } exit; } -} -else { +} else { $to_return->{errorString} = 'Failed to get "' . $health_url . '"... ' . $health_response->status_line; $to_return->{error} = 1; print $json->encode($to_return); @@ -212,14 +292,11 @@ $to_return->{data}{c_act_shards_perc} = $health_json->{active_shards_percent_as # unknown = 3 if ( $health_json->{status} =~ /[Gg][Rr][Ee][Ee][Nn]/ ) { $to_return->{data}{status} = 0; -} -elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { +} elsif ( $health_json->{status} =~ /[Yy][Ee][Ll][Ll][Oo][Ww]/ ) { $to_return->{data}{status} = 1; -} -elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { +} elsif ( $health_json->{status} =~ /[Rr][Ee][Dd]/ ) { $to_return->{data}{status} = 2; -} -else { +} else { $to_return->{data}{status} = 3; } @@ -244,8 +321,7 @@ if ( defined( $stats_json->{_all}{total}{indexing}{is_throttled} ) && $stats_json->{_all}{total}{indexing}{is_throttled} eq 'true' ) { $to_return->{data}{ti_throttled} = 1; -} -else { +} else { $to_return->{data}{ti_throttled} = 0; } @@ -316,8 +392,27 @@ $to_return->{data}{trc_misses} = $stats_json->{_all}{total}{request_cache}{mi $to_return->{data}{tst_size} = $stats_json->{_all}{total}{store}{size_in_bytes}; $to_return->{data}{tst_res_size} = $stats_json->{_all}{total}{store}{reserved_in_bytes}; -print $json->encode($to_return); +my $raw_json = $json->encode($to_return); if ( !$opts{P} ) { - print "\n"; + $raw_json = $raw_json . "\n"; } + +if ( !$opts{q} ) { + print $raw_json; +} + +if ( !$opts{w} ) { + exit 0; +} + +write_file( $output_base, { atomic => 1 }, $raw_json ); + +my $compressed_string; +gzip \$raw_json => \$compressed_string; +my $compressed = encode_base64($compressed_string); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; + +write_file( $output_base . '.snmp', { atomic => 1 }, $compressed ); + exit 0; From 5c5433ee3d0a01e792e5bf2d8d43559025a7f21f Mon Sep 17 00:00:00 2001 From: adamus1red Date: Thu, 3 Oct 2024 16:36:25 +0100 Subject: [PATCH 449/497] Add inital IPMItools functionality for Powermon extension. (#405) * Add inital IPMItools functionality created `getIPMIdata()` based on existing `getHPASMData()`. Leverages ipmitool to gather power usage data. * fix some lint errors --- snmp/powermon-snmp.py | 45 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 43 insertions(+), 2 deletions(-) diff --git a/snmp/powermon-snmp.py b/snmp/powermon-snmp.py index d9f179c1b..c98f1ed5d 100755 --- a/snmp/powermon-snmp.py +++ b/snmp/powermon-snmp.py @@ -62,8 +62,9 @@ # 20210204 - v1.2 - added top-level reading, librenms option # 20210205 - v1.3 - added cents per kWh # 20210205 - v1.4 - improvement to UI +# 20220513 - v1.5 - Add inital IPMItool method -version = 1.4 +version = 1.5 ### Libraries @@ -97,7 +98,7 @@ + " [-m|--method ] [-N|--no-librenms] [-p|--pretty]" + " [-v|--verbose] [-w|--warnings] | -l|--list-methods | -h|--help" ) -methods = ["sensors", "hpasmcli"] +methods = ["sensors", "hpasmcli", "ipmitool"] # costPerkWh = 0.15 # <<<< CHANGE ### General functions @@ -138,6 +139,10 @@ def getData(method): elif method == "hpasmcli": data = getHPASMData() + + elif method == "ipmitool": + data = getIPMIdata() + else: usageError("You must specify a method.") @@ -290,6 +295,42 @@ def getHPASMData(): return hdata +def getIPMIdata(): + global error, errorString + error = 2 + errorString = "No power sensor found" + + exe = shutil.which("ipmitool") + # if not os.access(candidate, os.W_OK): + cmd = [exe, "dcmi", "power", "reading"] + warningMsg("ipmitool only runs as root") + + try: + output = subprocess.run( + cmd, capture_output=True, check=True, text=True, timeout=2 + ) + + except subprocess.CalledProcessError as e: + errorMsg(str(e) + ": " + str(e.stdout).strip("\n")) + sys.exit(1) + + psu_reading = "^\s+Instantaneous power reading:\s+" + + rawdata = str(output.stdout).replace("\t", " ").replace("\n ", "\n").split("\n") + + hdata = {} + hdata["psu"] = {} # Init PSU data structure + hdata["psu"][0] = {} # Only one value is returned. + + for line in rawdata: + if re.match(psu_reading, line): + verboseMsg("found power meter reading: " + line) + junk, meter_reading = line.split(":", 1) + hdata["psu"][0]["reading"] = psu_reading.replace("Watts", "").strip() + + return hdata + + # Argument Parsing try: opts, args = getopt.gnu_getopt( From c95beb63b3c136ab944a6d0b20d4b38011bb3d4c Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 3 Oct 2024 11:46:20 -0500 Subject: [PATCH 450/497] "Failed in segment" is now considered a read failure for smart (#548) * add check for "Failed in segment" for self test * minor version bump --- snmp/smart-v1 | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/snmp/smart-v1 b/snmp/smart-v1 index 545282b99..935ed7a62 100755 --- a/snmp/smart-v1 +++ b/snmp/smart-v1 @@ -1,5 +1,5 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -113,7 +113,7 @@ my $useSN = 1; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "SMART SNMP extend 0.3.1\n"; + print "SMART SNMP extend 0.3.2\n"; } sub main::HELP_MESSAGE { @@ -784,6 +784,8 @@ foreach my $line (@disks) { $IDs{'interrupted'} = scalar @interrupted; my @read_failure = grep( /read failure/, @outputA ); $IDs{'read_failure'} = scalar @read_failure; + my @read_failure2 = grep( /Failed in segment/, @outputA ); + $IDs{'read_failure'} = $IDs{'read_failure'} + scalar @read_failure2; my @unknown_failure = grep( /unknown failure/, @outputA ); $IDs{'unknown_failure'} = scalar @unknown_failure; my @extended = grep( /\d.*\ ([Ee]xtended|[Ll]ong).*(?![Dd]uration)/, @outputA ); From 9bb064fbcab8524ca37ff1c80db0e704c98d0ce1 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 4 Oct 2024 01:20:52 -0500 Subject: [PATCH 451/497] add read, write, and checksum error gathering for zpools (#549) --- snmp/zfs | 57 +++++++++++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 48 insertions(+), 9 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 7b2412ced..4033553c5 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.1.1 +0.2.0 =head1 DESCRIPTION @@ -79,6 +79,7 @@ use File::Slurp; use MIME::Base64; use IO::Compress::Gzip qw(gzip $GzipError); use Pod::Usage; +use Scalar::Util qw(looks_like_number); sub main::VERSION_MESSAGE { pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); @@ -124,14 +125,18 @@ if ($help) { my $zpool_output = `/sbin/zpool list -pH`; my @pools = split( /\n/, $zpool_output ); my $pools_int = 0; -$tojson{online} = 0; -$tojson{degraded} = 0; -$tojson{offline} = 0; -$tojson{faulted} = 0; -$tojson{health} = 1; -$tojson{unavail} = 0; -$tojson{removed} = 0; -$tojson{unknown} = 0; +$tojson{online} = 0; +$tojson{degraded} = 0; +$tojson{offline} = 0; +$tojson{faulted} = 0; +$tojson{health} = 1; +$tojson{unavail} = 0; +$tojson{removed} = 0; +$tojson{unknown} = 0; +$tojson{read_errors} = 0; +$tojson{write_errors} = 0; +$tojson{checksum_errors} = 0; +$tojson{total_errors} = 0; my @toShoveIntoJSON; while ( defined( $pools[$pools_int] ) ) { @@ -211,6 +216,40 @@ while ( defined( $pools[$pools_int] ) ) { } } + # get read/write/checksum info for spools + $newPool{read_errors} = 0; + $newPool{write_errors} = 0; + $newPool{checksum_errors} = 0; + my $pool_status = `zpool status $newPool{name}`; + my @pool_status_split = split(/\n/, $pool_status); + my $pool_config_start; + foreach my $line (@pool_status_split) { + if ($pool_config_start && $line =~ /^[\ \t]*$/) { + $pool_config_start = 0; + } elsif ($line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/) { + $pool_config_start = 1; + } elsif ($pool_config_start) { + my @pool_line_split = split(/[\ \t]+/, $line); + if ( + defined($pool_line_split[3]) && + looks_like_number($pool_line_split[3]) && + defined($pool_line_split[4]) && + looks_like_number($pool_line_split[4]) && + defined($pool_line_split[5]) && + looks_like_number($pool_line_split[5]) + ) { + $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; + $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; + $newPool{checksum_errors} = $newPool{checksum_errors} + $pool_line_split[5]; + } + } + } + $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; + $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; + $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; + $tojson{checksum_errors} = $tojson{checksum_errors} + $newPool{checksum_errors}; + $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; + push( @toShoveIntoJSON, \%newPool ); $pools_int++; From a7897972fbc807f64e16d427e49e3aec7ab8f97d Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 19 Oct 2024 07:57:04 -0500 Subject: [PATCH 452/497] fixes for ZFS for getting perf stats (#550) * off by one fix and formatting cleanup * add a missing value --- snmp/zfs | 68 ++++++++++++++++++++++++++++++-------------------------- 1 file changed, 36 insertions(+), 32 deletions(-) diff --git a/snmp/zfs b/snmp/zfs index 4033553c5..0273eb7b0 100755 --- a/snmp/zfs +++ b/snmp/zfs @@ -6,7 +6,7 @@ zfs - LibreNMS JSON SNMP extend for gathering backups for ZFS =head1 VERSION -0.2.0 +0.3.0 =head1 DESCRIPTION @@ -193,22 +193,27 @@ while ( defined( $pools[$pools_int] ) ) { my $iostat = `zpool iostat -l -q -p -H $newPool{name}`; chomp($iostat); - $iostat =~ s/\t/,/g; + $iostat =~ s/\t+/,/g; $iostat =~ s/\,\-\,\-\,/\,0\,0\,/g; $iostat =~ s/\%//g; $iostat =~ s/\,([0-1\.]*)x\,/,$1,/; chomp($iostat); my $parsed; ( - $parsed, $parsed, $newPool{operations_r}, $newPool{operations_w}, - $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, $newPool{total_wait_w}, - $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, $newPool{syncq_wait_w}, - $newPool{asyncq_wait_w}, $newPool{scrub_wait}, $newPool{trim_wait}, $newPool{syncq_read_p}, - $newPool{syncq_read_a}, $newPool{syncq_write_p}, $newPool{syncq_write_a}, $newPool{asyncq_read_p}, - $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, - $newPool{scrubq_read_a}, $newPool{trimq_write_p}, $newPool{trimq_write_a}, + $parsed, $parsed, $parsed, $newPool{operations_r}, + $newPool{operations_w}, $newPool{bandwidth_r}, $newPool{bandwidth_w}, $newPool{total_wait_r}, + $newPool{total_wait_w}, $newPool{disk_wait_r}, $newPool{disk_wait_w}, $newPool{syncq_wait_r}, + $newPool{syncq_wait_w}, $newPool{asyncq_wait_r}, $newPool{asyncq_wait_w}, $newPool{scrub_wait}, + $newPool{trim_wait}, $newPool{syncq_read_p}, $newPool{syncq_read_a}, $newPool{syncq_write_p}, + $newPool{syncq_write_a}, $newPool{asyncq_read_p}, $newPool{asyncq_read_a}, $newPool{asyncq_write_p}, + $newPool{asyncq_write_a}, $newPool{scrubq_read_p}, $newPool{scrubq_read_a}, $newPool{trimq_write_p}, + $newPool{trimq_write_a}, ) = split( /\,/, $iostat ); + if ( $newPool{trim_wait} eq '-' ) { + $newPool{trim_wait} = 0; + } + my @pool_keys = keys(%newPool); foreach my $item (@pool_keys) { if ( $item ne 'altroot' && $newPool{$item} eq '-' ) { @@ -220,35 +225,34 @@ while ( defined( $pools[$pools_int] ) ) { $newPool{read_errors} = 0; $newPool{write_errors} = 0; $newPool{checksum_errors} = 0; - my $pool_status = `zpool status $newPool{name}`; - my @pool_status_split = split(/\n/, $pool_status); + my $pool_status = `zpool status $newPool{name}`; + my @pool_status_split = split( /\n/, $pool_status ); my $pool_config_start; foreach my $line (@pool_status_split) { - if ($pool_config_start && $line =~ /^[\ \t]*$/) { + if ( $pool_config_start && $line =~ /^[\ \t]*$/ ) { $pool_config_start = 0; - } elsif ($line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/) { + } elsif ( $line =~ /NAME[\ \t]+STATE[\ \t]+READ[\ \t]+WRITE[\ \t]+CKSUM/ ) { $pool_config_start = 1; } elsif ($pool_config_start) { - my @pool_line_split = split(/[\ \t]+/, $line); - if ( - defined($pool_line_split[3]) && - looks_like_number($pool_line_split[3]) && - defined($pool_line_split[4]) && - looks_like_number($pool_line_split[4]) && - defined($pool_line_split[5]) && - looks_like_number($pool_line_split[5]) - ) { - $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; - $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; + my @pool_line_split = split( /[\ \t]+/, $line ); + if ( defined( $pool_line_split[3] ) + && looks_like_number( $pool_line_split[3] ) + && defined( $pool_line_split[4] ) + && looks_like_number( $pool_line_split[4] ) + && defined( $pool_line_split[5] ) + && looks_like_number( $pool_line_split[5] ) ) + { + $newPool{read_errors} = $newPool{read_errors} + $pool_line_split[3]; + $newPool{write_errors} = $newPool{write_errors} + $pool_line_split[4]; $newPool{checksum_errors} = $newPool{checksum_errors} + $pool_line_split[5]; - } - } - } - $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; - $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; - $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; + } ## end if ( defined( $pool_line_split[3] ) && looks_like_number...) + } ## end elsif ($pool_config_start) + } ## end foreach my $line (@pool_status_split) + $newPool{total_errors} = $newPool{read_errors} + $newPool{write_errors} + $newPool{checksum_errors}; + $tojson{read_errors} = $tojson{read_errors} + $newPool{read_errors}; + $tojson{write_errors} = $tojson{write_errors} + $newPool{write_errors}; $tojson{checksum_errors} = $tojson{checksum_errors} + $newPool{checksum_errors}; - $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; + $tojson{total_errors} = $tojson{total_errors} + $newPool{total_errors}; push( @toShoveIntoJSON, \%newPool ); @@ -474,7 +478,7 @@ $tojson{l2_access_total} = $tojson{l2_hits} + $tojson{l2_misses}; my %head_hash; $head_hash{data} = \%tojson; -$head_hash{version} = 3; +$head_hash{version} = 4; $head_hash{error} = 0; $head_hash{errorString} = ''; From f5429f26a52cb52757293c01439f662129fa529e Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 25 Oct 2024 15:08:41 -0500 Subject: [PATCH 453/497] linux_softnet_stat nolonger uses Gzip::Faster... uses IO::Compress::Gzip as it comes default (#551) --- snmp/linux_softnet_stat | 32 ++++++++++++++++++++------------ 1 file changed, 20 insertions(+), 12 deletions(-) diff --git a/snmp/linux_softnet_stat b/snmp/linux_softnet_stat index f7987a391..cc547a1f3 100755 --- a/snmp/linux_softnet_stat +++ b/snmp/linux_softnet_stat @@ -1,10 +1,15 @@ #!/usr/bin/env perl +use strict; +use warnings; + =head1 DESCRIPTION This is a SNMP extend for monitoring /proc/net/softnet_stat on Linux for use with LibreNMS. -For more information, see L. +This just needs added to snmpd.conf like below. + + extend linux_softnet_stat /etc/snmp/linux_softnet_stat -b =head1 SWITCHES @@ -16,24 +21,29 @@ Pretty print the JSON. If used with -b, this switch will be ignored. Gzip the output and convert to Base64. +=head1 VERSION + +0.1.0 + =cut -use strict; -use warnings; +our $VERSION = '0.1.0'; + use JSON; use Getopt::Std; use File::Slurp; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; $Getopt::Std::STANDARD_HELP_VERSION = 1; sub main::VERSION_MESSAGE { - print "Linux softnet stats extend 0.0.1\n"; + print 'Linux softnet stats extend ' . $VERSION . "\n"; } sub main::HELP_MESSAGE { - + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); } #this will be dumped to json at the end @@ -132,13 +142,11 @@ if ( !$opts{p} && !$opts{b} ) { exit 0; } -my $compressed = encode_base64( gzip($return_string) ); +my $toReturnCompressed; +gzip \$return_string => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; -if ( length($compressed) > length($return_string) ) { - print $return_string. "\n"; -} else { - print $compressed; -} +print $compressed; exit 0; From b8d34f336e792f3a59b0fb53c6f9cca8521bd888 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Fri, 25 Oct 2024 20:31:55 -0500 Subject: [PATCH 454/497] privoxy: doc update, no longer needs Gzip::Faster, add -w (#552) --- snmp/privoxy | 124 +++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 110 insertions(+), 14 deletions(-) diff --git a/snmp/privoxy b/snmp/privoxy index 26e87cddd..4af9a00a5 100755 --- a/snmp/privoxy +++ b/snmp/privoxy @@ -1,6 +1,6 @@ #!/usr/bin/env perl -#Copyright (c) 2023, Zane C. Bowers-Hadley +#Copyright (c) 2024, Zane C. Bowers-Hadley #All rights reserved. # #Redistribution and use in source and binary forms, with or without modification, @@ -23,32 +23,91 @@ #OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. -=for comment +use warnings; +use strict; + +=head1 NAME + +privoxy - LibreNMS JSON style SNMP extend for monitoring Privoxy + +=head1 VERSION + +0.2.0 + +=cut + +our $VERSION = '0.2.0'; + +=head1 SYNOPSIS + +privoxy B<-w> [B<-o> ] [B<-f> ] [B<-p>] + +privoxy [B<-o> ] [B<-f> ] [B<-p>] + +=head1 SNMPD CONFIG Add this to snmpd.conf as below and restart snmpd. extend privoxy /etc/snmp/extends/privoxy -Supported command line options are as below. +Or if using cron... + + # cron + */5 * * * * root /etc/snmp/privoxy -w > /dev/null + + # snmpd.conf + extend privoxy /bin/cat /var/cache/privoxy_extend.json.snmp + +=head1 FLAGS + +=head2 -f + +The Privoxy logfile. + +Default: /var/log/privoxy/logfile + +=head2 -c + +Use gzip+base64 LibreNMS style compression. + +=head2 -p + +Pretty print. + +=head2 -o + +Where to write it out to. + +Default: /var/cache/privoxy_extend.json - -f Logfile. - Default: /var/log/privoxy/logfile - -c gzip+base64 compression - -p Pretty print. +=head2 -w -The last is only really relevant to the usage with SNMP. +Write out. Implies -c + +=head1 INSTALL + +FreeBSD... + + pkg install p5-JSON p5-MIME-Base64 p5-File-Slurp p5-File-ReadBackwards p5-IPC-Run3 p5-Time-Piece + +Debian... + + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl libfile-readbackwards-perl libipc-run3-perl cpanminus + cpanm Time::Piece =cut -use strict; -use warnings; use Getopt::Std; use File::ReadBackwards; use JSON; use Time::Piece; use IPC::Run3; use MIME::Base64; -use Gzip::Faster; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; + +$Getopt::Std::STANDARD_HELP_VERSION = 1; # get the current time my $t = localtime; @@ -71,7 +130,7 @@ my $compress; #gets the options my %opts; -getopts( 'f:cp', \%opts ); +getopts( 'f:cpwo', \%opts ); if ( defined( $opts{f} ) ) { $logfile = $opts{f}; } @@ -79,6 +138,22 @@ if ( defined( $opts{c} ) ) { $compress = 1; } +if ($opts{w}) { + $opts{c} = 1; +} + +sub main::VERSION_MESSAGE { + print 'privoxy LibreNMS extend v. ' . $VERSION . "\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +if ( !defined( $opts{o} ) ) { + $opts{o} = '/var/cache/privoxy_extend.json'; +} + my $json = JSON->new->allow_nonref->canonical(1); if ( $opts{p} ) { $json->pretty(); @@ -426,8 +501,29 @@ if ($compress) { exit 0; } ## end if ($compress) -print $json->encode($to_return); +my $raw_json_return = $json->encode($to_return); if ( !$opts{p} ) { - print "\n"; + $raw_json_return = $raw_json_return . "\n"; } + +if ( $opts{w} ) { + write_file( $opts{o}, $raw_json_return ); +} + +if ( $opts{c} ) { + # compress and write to the cache file for it + my $compressed_string; + gzip \$raw_json_return => \$compressed_string; + my $compressed = encode_base64($compressed_string); + $compressed =~ s/\n//g; + $compressed = $compressed . "\n"; + print $compressed; + + if ( $opts{w} ) { + write_file( $opts{o} . '.snmp', $compressed ); + } +} else { + print $raw_json_return; +} + exit 0; From 5ad2c2a2b40c584dc4c06fe2003718f1b0390f46 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 17 Nov 2024 21:10:01 -0600 Subject: [PATCH 455/497] add a extend for nextcloud (#554) * very early initial work * checkpoint * add last_seen handling --- snmp/nextcloud | 337 +++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 337 insertions(+) create mode 100644 snmp/nextcloud diff --git a/snmp/nextcloud b/snmp/nextcloud new file mode 100644 index 000000000..e7d648737 --- /dev/null +++ b/snmp/nextcloud @@ -0,0 +1,337 @@ +#!/usr/bin/env perl + +=head1 NAME + +nextcloud - LibreNMS JSON SNMP extend for gathering backups for Nextcloud + +=head1 VERSION + +0.0.1 + +=head1 DESCRIPTION + +For more information, see L. + +=head1 SWITCHES + +=head2 -i + +Dir location for the Nextcloud install. + +The defaults are as below. + +FreeBSD: /usr/local/www/nextcloud +Linux: /var/www/nextcloud + +=head2 -m + +If set, does consider the user directories to not all be under the same mountpoint. + +=head2 -o + +Where to write the output to. + +Default: /var/cache/nextcloud_extend + +=head2 -q + +Don't print the JSON results when done. + +=head1 SETUP + +Create the required directory to write to. + + mkdir /var/cache/nextcloud_extend + chown -R $nextcloud_user /var/cache/nextcloud_extend + +snmpd.conf + + extend nextcloud /bin/cat /var/cache/nextcloud_extend/snmp + +cron, specify -o or -i if needed/desired + + */5 * * * * /etc/snmpd/nextcloud -q 2> /dev/null + +=head1 REQUIREMENTS + +Debian... + + apt-get install libjson-perl libfile-slurp-perl libmime-base64-perl cpanminus + cpanm Time::Piece + +FreeBSD... + + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 p5-Time-Piece + +Generic cpanm... + + cpanm JSON File::Slurp Mime::Base64 + +=cut + +#Copyright (c) 2024, Zane C. Bowers-Hadley +#All rights reserved. +# +#Redistribution and use in source and binary forms, with or without modification, +#are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright notice, +# this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above copyright notice, +# this list of conditions and the following disclaimer in the documentation +# and/or other materials provided with the distribution. +# +#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +#ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +#WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +#IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, +#INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +#LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +#OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF +#THE POSSIBILITY OF SUCH DAMAGE. + +# Many thanks to Ben Rockwood, Jason J. Hellenthal, and Martin Matuska +# for zfs-stats and figuring out the math for all the stats +# +# Thanks to dlangille for pointing out the issues on 14 and Bobzikwick figuring out the fix in issues/501 + +use strict; +use warnings; +use JSON; +use Getopt::Long; +use File::Slurp; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use Pod::Usage; +use String::ShellQuote; +use Time::Piece; + +sub main::VERSION_MESSAGE { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +#this will be dumped to json at the end +my %tojson; +$tojson{total} = 0; +$tojson{user_count} = 0; +$tojson{free} = 0; +$tojson{used} = 0; +$tojson{enabled_apps} = 0; +$tojson{disabled_apps} = 0; +$tojson{encryption_enabled} = 0; +$tojson{calendars} = 0; +$tojson{multimount} = 0; +$tojson{users} = {}; + +# current user +my $current_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); + +#gets the options +my %opts; +my $be_quiet; +my $output_dir = '/var/cache/nextcloud_extend'; +my $install_dir; +my $version; +my $help; +my $multimount; +GetOptions( + q => \$be_quiet, + 'o=s' => \$output_dir, + 'i=s' => \$install_dir, + v => \$version, + version => \$version, + h => \$help, + help => \$help, + m => \$multimount, +); + +if ($version) { + pod2usage( -exitval => 255, -verbose => 99, -sections => qw(VERSION), -output => \*STDOUT, ); +} + +if ($help) { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +if ($multimount) { + $tojson{multimount} = 1; +} + +# get what to use for the install dir if not specified +if ( !defined($install_dir) ) { + if ( $^O eq 'freebsd' ) { + $install_dir = '/usr/local/www/nextcloud'; + } elsif ( $^O eq 'linux' ) { + $install_dir = '/var/www/nextcloud'; + } else { + die('-i not specified for the install dir for Nextcloud'); + } +} + +# ensure the install dir exists +if ( !-d $install_dir ) { + die( 'the Nextcloud install directory, "' . $install_dir . '", is not a directory or does not exist' ); +} + +# change to the install dir +chdir($install_dir) || die( 'failed to chdir to the Nextcloud install dir, "' . $install_dir . '",' ); + +# ensure the config exists +if ( !-f './config/config.php' ) { + die( '"./config/config.php" does not exist under the Nextcloud install dir ,"' . $install_dir . '",' ); +} + +# ensure ./occ happens +if ( !-f './occ' ) { + die( '"./occ" does not exist under the Nextcloud install dir ,"' . $install_dir . '",' ); +} + +# ensure the install dir exists and try to create it if it does not +if ( !-d $output_dir ) { + mkdir($output_dir) || die( '"' . $output_dir . '" does not exist and could not be created' ); +} + +### +### +### get user info +### +### +my $user_list_raw = `php occ user:list --output=json`; +if ( $? != 0 ) { + die( '"php occ user:list" existed non-zero with.... ' . "\n" . $user_list_raw . "\n..." ); +} +my @users; +eval { + my $decodes_users = decode_json($user_list_raw); + @users = keys( %{$decodes_users} ); +}; + +foreach my $user (@users) { + my $quoted_user = shell_quote($user); + my $user_info_raw = `php occ user:info --output=json $quoted_user`; + eval { + my $user_info = decode_json($user_info_raw); + if ( defined( $user_info->{user_id} ) + && defined( $user_info->{storage} ) + && ref( $user_info->{storage} ) eq 'HASH' + && defined( $user_info->{last_seen} ) ) + { + my $last_seen = $user_info->{last_seen}; + if ( $last_seen eq '1970-01-01T00:00:00+00:00' ) { + $last_seen = -1; + } else { + eval { + $last_seen =~ s/(\d+)\:(\d+)$/$1$2/; + my $t1 = gmtime; + my $t2 = Time::Piece->strptime( $last_seen, "%Y-%m-%dT%H:%M:%S%z" ); + $last_seen = $t1->epoch - $t2->epoch; + }; + if ($@) { + $last_seen = undef; + } + } ## end else [ if ( $last_seen eq '1970-01-01T00:00:00+00:00')] + $tojson{users}{$user} = { + 'free' => $user_info->{storage}{free}, + 'quota' => $user_info->{storage}{quota}, + 'relative' => $user_info->{storage}{relative}, + 'total' => $user_info->{storage}{total}, + 'used' => $user_info->{storage}{used}, + 'last_seen' => $last_seen, + 'calendars' => 0, + }; + $tojson{free} = $user_info->{storage}{free}; + $tojson{used} = $tojson{used} + $user_info->{storage}{used}; + if ( $user_info->{storage}{quota} > 0 ) { + $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; + } + $tojson{user_count}++; + # does not currently support output options + my $calendar_info_raw = `php occ dav:list-calendars $quoted_user 2> /dev/null`; + if ( $? == 0 ) { + # if the table has more than 4 lines the other lines contain calender info + # so given it is zero index the number of calendars can be fournd via subtracting 3 + my @calendar_info_split = split( /\n/, $calendar_info_raw ); + if ( $#calendar_info_split > 3 ) { + $tojson{users}{$user}{'calendars'} = $#calendar_info_split - 3; + $tojson{calendars} = $tojson{'calendars'} + $tojson{users}{$user}{'calendars'}; + } + } + } ## end if ( defined( $user_info->{user_id} ) && defined...) + }; +} ## end foreach my $user (@users) + +### +### +### get app info +### +### +my $app_info_raw = `php occ app:list --output=json`; +if ( $? == 0 ) { + eval { + my $app_info = decode_json($app_info_raw); + if ( defined( $app_info->{disabled} ) + && ref( $app_info->{disabled} ) eq 'HASH' ) + { + my @disabled_apps = keys( %{ $app_info->{disabled} } ); + $tojson{disabled_apps} = $#disabled_apps + 1; + } + if ( defined( $app_info->{enabled} ) + && ref( $app_info->{enabled} ) eq 'HASH' ) + { + my @disabled_apps = keys( %{ $app_info->{enabled} } ); + $tojson{enabled_apps} = $#disabled_apps + 1; + } + }; +} ## end if ( $? == 0 ) + +### +### +### get encryption status +### +### +my $encrption_info_raw = `php occ encryption:status --output=json`; +if ( $? == 0 ) { + eval { + my $encrption_info = decode_json($encrption_info_raw); + if ( defined($encrption_info) + && ref( $encrption_info->{enabled} ) eq '' + && $encrption_info->{enabled} =~ /^(1|[Tt][Rr][Uu][Ee])$/ ) + { + $tojson{encryption_enabled} = 1; + } + }; +} ## end if ( $? == 0 ) + +my %head_hash; +$head_hash{data} = \%tojson; +$head_hash{version} = 1; +$head_hash{error} = 0; +$head_hash{errorString} = ''; + +my $json_output = encode_json( \%head_hash ); + +if ( !$be_quiet ) { + print $json_output. "\n"; +} + +eval { write_file( $output_dir . '/json', $json_output ); }; +if ($@) { + warn( 'failed to write out "' . $output_dir . '/json" ... ' . $@ ); +} + +my $toReturnCompressed; +gzip \$json_output => \$toReturnCompressed; +my $compressed = encode_base64($toReturnCompressed); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; + +eval { write_file( $output_dir . '/snmp', $compressed ); }; +if ($@) { + warn( 'failed to write out "' . $output_dir . '/snmp" ... ' . $@ ); +} From 6cbcafc48329adf42e93f4a4a8b5eafeeebb7312 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 23 Nov 2024 19:18:37 -0600 Subject: [PATCH 456/497] add last_seen_string for nextcloud user info (#555) --- snmp/nextcloud | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index e7d648737..6d0d231e5 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -237,13 +237,14 @@ foreach my $user (@users) { } } ## end else [ if ( $last_seen eq '1970-01-01T00:00:00+00:00')] $tojson{users}{$user} = { - 'free' => $user_info->{storage}{free}, - 'quota' => $user_info->{storage}{quota}, - 'relative' => $user_info->{storage}{relative}, - 'total' => $user_info->{storage}{total}, - 'used' => $user_info->{storage}{used}, - 'last_seen' => $last_seen, - 'calendars' => 0, + 'free' => $user_info->{storage}{free}, + 'quota' => $user_info->{storage}{quota}, + 'relative' => $user_info->{storage}{relative}, + 'total' => $user_info->{storage}{total}, + 'used' => $user_info->{storage}{used}, + 'last_seen' => $last_seen, + 'last_seen_string' => $user_info->{last_seen}, + 'calendars' => 0, }; $tojson{free} = $user_info->{storage}{free}; $tojson{used} = $tojson{used} + $user_info->{storage}{used}; From 95261e9b9bf6281c55b5fe634d1083613a19a229 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Nov 2024 03:05:24 -0600 Subject: [PATCH 457/497] save the total for nextcloud storage info (#556) --- snmp/nextcloud | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/nextcloud b/snmp/nextcloud index 6d0d231e5..df670b50f 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -247,6 +247,7 @@ foreach my $user (@users) { 'calendars' => 0, }; $tojson{free} = $user_info->{storage}{free}; + $tojson{total} = $user_info->{storage}{total}; $tojson{used} = $tojson{used} + $user_info->{storage}{used}; if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; From c831c34fc22627598fe859984fad752e29ab3281 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Mon, 25 Nov 2024 03:36:01 -0600 Subject: [PATCH 458/497] ensure quota will always be present for a total for nextcloud (#557) --- snmp/nextcloud | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index df670b50f..37fdddd70 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -128,6 +128,7 @@ $tojson{encryption_enabled} = 0; $tojson{calendars} = 0; $tojson{multimount} = 0; $tojson{users} = {}; +$tojson{quota} = 0; # current user my $current_user = $ENV{LOGNAME} || $ENV{USER} || getpwuid($<); @@ -246,9 +247,9 @@ foreach my $user (@users) { 'last_seen_string' => $user_info->{last_seen}, 'calendars' => 0, }; - $tojson{free} = $user_info->{storage}{free}; + $tojson{free} = $user_info->{storage}{free}; $tojson{total} = $user_info->{storage}{total}; - $tojson{used} = $tojson{used} + $user_info->{storage}{used}; + $tojson{used} = $tojson{used} + $user_info->{storage}{used}; if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; } From ea9d1648b1a31534c8af890441fbc5ceb2364745 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sat, 30 Nov 2024 00:55:02 -0600 Subject: [PATCH 459/497] a bit of cleanup for user count and saving the files is now atomic for nextcloud (#558) --- snmp/nextcloud | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/snmp/nextcloud b/snmp/nextcloud index 37fdddd70..449c98e3f 100644 --- a/snmp/nextcloud +++ b/snmp/nextcloud @@ -212,10 +212,12 @@ eval { my $decodes_users = decode_json($user_list_raw); @users = keys( %{$decodes_users} ); }; +$tojson{user_count} = $#users; +$tojson{user_count}++; foreach my $user (@users) { my $quoted_user = shell_quote($user); - my $user_info_raw = `php occ user:info --output=json $quoted_user`; + my $user_info_raw = `php occ user:info --output=json $quoted_user 2> /dev/null`; eval { my $user_info = decode_json($user_info_raw); if ( defined( $user_info->{user_id} ) @@ -253,7 +255,6 @@ foreach my $user (@users) { if ( $user_info->{storage}{quota} > 0 ) { $tojson{quota} = $tojson{quota} + $user_info->{storage}{quota}; } - $tojson{user_count}++; # does not currently support output options my $calendar_info_raw = `php occ dav:list-calendars $quoted_user 2> /dev/null`; if ( $? == 0 ) { @@ -274,7 +275,7 @@ foreach my $user (@users) { ### get app info ### ### -my $app_info_raw = `php occ app:list --output=json`; +my $app_info_raw = `php occ app:list --output=json 2> /dev/null`; if ( $? == 0 ) { eval { my $app_info = decode_json($app_info_raw); @@ -298,7 +299,7 @@ if ( $? == 0 ) { ### get encryption status ### ### -my $encrption_info_raw = `php occ encryption:status --output=json`; +my $encrption_info_raw = `php occ encryption:status --output=json 2> /dev/null`; if ( $? == 0 ) { eval { my $encrption_info = decode_json($encrption_info_raw); @@ -323,7 +324,7 @@ if ( !$be_quiet ) { print $json_output. "\n"; } -eval { write_file( $output_dir . '/json', $json_output ); }; +eval { write_file( $output_dir . '/json', { atomic => 1 }, $json_output ); }; if ($@) { warn( 'failed to write out "' . $output_dir . '/json" ... ' . $@ ); } @@ -334,7 +335,7 @@ my $compressed = encode_base64($toReturnCompressed); $compressed =~ s/\n//g; $compressed = $compressed . "\n"; -eval { write_file( $output_dir . '/snmp', $compressed ); }; +eval { write_file( $output_dir . '/snmp', { atomic => 1 }, $compressed ); }; if ($@) { warn( 'failed to write out "' . $output_dir . '/snmp" ... ' . $@ ); } From 605724ac88affe17e6a21a953e1702e82a44e167 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Dec 2024 17:45:26 -0600 Subject: [PATCH 460/497] add text_blob extend and update perlcritic options (#559) * ignore ProhibitBitwiseOperators * add text_blob extend --- .perlcriticrc | 2 +- snmp/text_blob | 370 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 371 insertions(+), 1 deletion(-) create mode 100755 snmp/text_blob diff --git a/.perlcriticrc b/.perlcriticrc index ab2e45531..732ce4851 100644 --- a/.perlcriticrc +++ b/.perlcriticrc @@ -1 +1 @@ -exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval +exclude = ProhibitExplicitReturnUndef ProhibitOneArgBless ProhibitStringyEval ProhibitBitwiseOperators diff --git a/snmp/text_blob b/snmp/text_blob new file mode 100755 index 000000000..030a583bd --- /dev/null +++ b/snmp/text_blob @@ -0,0 +1,370 @@ +#!/usr/bin/env perl + +use warnings; +use strict; + +=head1 NAME + +text_blob - LinbreNMS JSON extend for text blob stuff. + +=head1 VERSION + +0.0.1 + +=cut + +our $VERSION = '0.0.1'; + +=head1 SYNOPSIS + +wireguard [B<-c> ] [B<-q>] + +wireguard [B<-v>|B<--version>] + +wireguard [B<-h>|B<--help>] + +=head1 SWITCHES + +=head2 -c + +Config file to use. + +Default: /usr/local/etc/text_blob_extend.json + +=head2 -h|--help + +Print help info. + +=head2 -q + +Be quiet when running it. + +=head2 -v|--version + +Print version info. + +=head1 INSTALL + +Install the depends. + + # FreeBSD + pkg install p5-JSON p5-File-Slurp p5-MIME-Base64 + + # Debian + apt-get install libjson-perl libmime-base64-perl libfile-slurp-perl + +Then set it up in SNMPD. + +=head1 CONFIG + +The default config is /usr/local/etc/text_blob_extend.json . + + - .blobs :: A hash of commands to run. The key values are the name of the blob. + + - .global_envs :: A hash of enviromental values set. + + - .blob_envs :: A hash of per blob env values. The key name of the blob and each value is + a sub hash of enviromental values to set. + + - .output_dir :: Output directory to use. + - Default :: /var/cache/text_blob_extend + +Example + + { + "blobs":{ + "jls": "jls", + "dmesg": "dmesg", + "top_io": "top -b -m io -j", + "top_cpu": "top -b -m cpu -w -j", + "ps": "ps axuw", + "netstat": "netstat -rn" + } + } + +=cut + +use JSON; +use Getopt::Std; +use MIME::Base64; +use IO::Compress::Gzip qw(gzip $GzipError); +use File::Slurp; +use Pod::Usage; + +sub main::VERSION_MESSAGE { + print 'text_blob LibreNMS extend v. ' . $VERSION . "\n"; +} + +sub main::HELP_MESSAGE { + pod2usage( -exitval => 255, -verbose => 2, -output => \*STDOUT, ); +} + +$Getopt::Std::STANDARD_HELP_VERSION = 1; + +#gets the options +my %opts = (); +getopts( 'c:qvh', \%opts ); + +if ( $opts{v} ) { + &main::VERSION_MESSAGE; + exit 255; +} + +if ( $opts{h} ) { + &main::HELP_MESSAGE; + exit 255; +} + +if ( !defined( $opts{c} ) ) { + $opts{c} = '/usr/local/etc/text_blob_extend.json'; +} + +my $return_json = { + error => 0, + errorString => '', + version => 2, + data => { + non_zero_exits => 0, + warns => [], + blobs => {}, + blob_exit_val => {}, + blob_exit_signal => {}, + blob_has_coredump => {}, + }, +}; + +## +## +## get original env stuff +## +## +my @original_envs = keys(%ENV); +my %original_envs_vals; +foreach my $item (@original_envs) { + $original_envs_vals{$item} = $ENV{$item}; +} + +## +## +## real in the config +## +## +our $config = { + global_envs => {}, + blob_envs => {}, + blobs => {}, + output_dir => '/var/cache/text_blob_extend', +}; +my @global_envs; +my @blobs; +if ( -f $opts{c} ) { + eval { + my $raw_config = read_file( $opts{c} ); + my $parsed_config = decode_json($raw_config); + # process .global_envs if it exists + if ( defined( $parsed_config->{global_envs} ) + && ref( $parsed_config->{global_envs} ) eq 'HASH' ) + { + @global_envs = keys( %{ $parsed_config->{global_envs} } ); + foreach my $item (@global_envs) { + if ( ref( $parsed_config->{global_envs}{$item} ) ne '' ) { + my $warning + = '".global_envs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{global_envs}{$item} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + push( @global_envs, $item ); + $config->{global_envs}{$item} = $parsed_config->{global_envs}{$item}; + } + } ## end foreach my $item (@global_envs) + } elsif ( defined( $parsed_config->{global_envs} ) + && ref( $parsed_config->{global_envs} ) ne 'HASH' ) + { + my $warning = '.global_envs is not a hash but "' . ref( $parsed_config->{global_envs} ) . '"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } + # process .blob_envs + if ( defined( $parsed_config->{blob_envs} ) + && ref( $parsed_config->{blob_envs} ) eq 'HASH' ) + { + # ensure all .blob_envs are hashes + my @blob_envs = keys( %{ $parsed_config->{blob_envs} } ); + foreach my $item (@blob_envs) { + if ( ref( $parsed_config->{blob_envs}{$item} ) ne 'HASH' ) { + my $warning + = '".blob_envs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{blob_envs}{$item} ) + . ' and not "HASH"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + my @envs_for_blobs = keys( %{ $parsed_config->{blob_envs}{$item} } ); + # only create the hash if we have actual keys + if ( defined( $envs_for_blobs[0] ) ) { + $config->{blob_envs}{$item} = {}; + # we have keys, so only add scalars + foreach my $item2 (@envs_for_blobs) { + if ( ref( $parsed_config->{blob_envs}{$item}{$item2} ) ne '' ) { + my $warning + = '".blob_envs.' + . $item . '.' + . $item2 + . '" has a ref value of ' + . ref( $parsed_config->{blob_envs}{$item}{$item2} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + $config->{blob_envs}{$item}{$item2} = $parsed_config->{blob_envs}{$item}{$item2}; + } + } ## end foreach my $item2 (@envs_for_blobs) + } ## end if ( defined( $envs_for_blobs[0] ) ) + } ## end else [ if ( ref( $parsed_config->{blob_envs}{$item...}))] + } ## end foreach my $item (@blob_envs) + } elsif ( defined( $parsed_config->{blob_envs} ) + && ref( $parsed_config->{blob_envs} ) ne 'HASH' ) + { + my $warning = '.blob_envs is not a hash but "' . ref( $parsed_config->{blob_envs} ) . '"'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } + # process .blobs + if ( defined( $parsed_config->{blobs} ) + && ref( $parsed_config->{blobs} ) eq 'HASH' ) + { + # if here, it is a hash, now to check to make sure it is all sane + my @blobs_check = keys( %{ $parsed_config->{blobs} } ); + if ( !defined( $blobs_check[0] ) ) { + my $warning = '.blobs has no keys defined under it'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + # process + foreach my $item (@blobs_check) { + if ( ref( $parsed_config->{blobs}{$item} ) ne '' ) { + my $warning + = '".blobs.' + . $item + . '" has a ref value of ' + . ref( $parsed_config->{senvs}{$item} ) + . ' and not ""'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); + } else { + push( @blobs, $item ); + $config->{blobs}{$item} = $parsed_config->{blobs}{$item}; + } + } ## end foreach my $item (@blobs_check) + } ## end else [ if ( !defined( $blobs_check[0] ) ) ] + } elsif ( defined( $parsed_config->{blobs} ) + && ref( $parsed_config->{blobs} ) ne 'HASH' ) + { + # .blobs must always be a hash + die( '.blobs is not a hash but "' . ref( $parsed_config->{blob_envs} ) . '"' ); + } else { + # .blobs must always be defined and a hash + die('.blobs not defined and not a hash'); + } + # process .output_dir + if ( defined( $parsed_config->{output_dir} ) + && ref( $parsed_config->{output_dir} ) eq '' ) + { + # defined and is a scalar, so save it + $config->{output_dir} = $parsed_config->{output_dir}; + } elsif ( defined( $parsed_config->{output_dir} ) + && ref( $parsed_config->{output_dir} ) ne '' ) + { + # hash or array, so die + die( '.output_dir is not a string but a ref type of "' . ref( $parsed_config->{output_dir} ) . '"' ); + } + }; + if ($@) { + $return_json->{error} = 1; + $return_json->{errorString} = $@; + return_the_data( $return_json, $opts{B} ); + exit 0; + } +} else { + my $warning = 'Config file, "' . $opts{c} . '", does not exist or is not a file'; + warn($warning); + push( @{ $return_json->{data}{warns} }, $warning ); +} + +if ( -e $config->{output_dir} && !-d $config->{output_dir} ) { + die( 'Output dir, "' . $config->{output_dir} . '", is not a directory but it exists' ); +} elsif ( !-e $config->{output_dir} ) { + mkdir( $config->{output_dir} ) || die( 'Output dir, "' . $config->{output_dir} . '", could not be created' ); +} + +## +## +## process each specified text blob +## +## +foreach my $blob (@blobs) { + # + # reset default envs from run time + # + foreach my $item ( keys(%ENV) ) { + if ( !defined( $original_envs_vals{$item} ) ) { + delete( $ENV{$item} ); + } else { + $ENV{$item} = $original_envs_vals{$item}; + } + } + # + # set the global vars + # + foreach my $item (@global_envs) { + $ENV{$item} = $config->{global_envs}{$item}; + } + # + # set the blob envs + # + if ( defined( $config->{ blob_envs} { $blob } ) ) { + foreach my $item ( keys( %{ $config->{blob_envs}{$blob} } ) ) { + $ENV{$item} = $config->{blob_envs}{$blob}{$item}; + } + } + # + # run the command and get the stdout + # + my $command = $config->{blobs}{$blob}; + my $output = `$command`; + if ($? != 0) { + $return_json->{data}{non_zero_exits}++; + } + $return_json->{data}{blobs}{$blob} = $output; + $return_json->{data}{blob_exit_val}{$blob} = $? >> 8; + $return_json->{data}{blob_exit_signal}{$blob} = $? & 127; + $return_json->{data}{blob_has_coredump}{$blob} = $? & 128; +} ## end foreach my $blob (@blobs) + +## +## +## write the output +## +## + +my $raw_json = encode_json($return_json); + +if ( !$opts{q} ) { + print $raw_json. "\n"; +} + +write_file( $config->{output_dir} . '/json', { atomic => 1 }, $raw_json . "\n" ); + +my $compressed_string; +gzip \$raw_json => \$compressed_string; +my $compressed = encode_base64($compressed_string); +$compressed =~ s/\n//g; +$compressed = $compressed . "\n"; +my $print_compressed = 0; +write_file( $config->{output_dir} . '/snmp', { atomic => 1 }, $compressed ); From 847337b32047c7af07ac1c6dcce11af2749d4512 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Sun, 1 Dec 2024 19:45:14 -0600 Subject: [PATCH 461/497] POD cleanups and fix handling of global_envs (#560) --- snmp/text_blob | 34 +++++++++++++++++++++------------- 1 file changed, 21 insertions(+), 13 deletions(-) diff --git a/snmp/text_blob b/snmp/text_blob index 030a583bd..24bf6aec1 100755 --- a/snmp/text_blob +++ b/snmp/text_blob @@ -9,19 +9,19 @@ text_blob - LinbreNMS JSON extend for text blob stuff. =head1 VERSION -0.0.1 +0.0.2 =cut -our $VERSION = '0.0.1'; +our $VERSION = '0.0.2'; =head1 SYNOPSIS -wireguard [B<-c> ] [B<-q>] +text_blob [B<-c> ] [B<-q>] -wireguard [B<-v>|B<--version>] +text_blob [B<-v>|B<--version>] -wireguard [B<-h>|B<--help>] +text_blob [B<-h>|B<--help>] =head1 SWITCHES @@ -55,6 +55,14 @@ Install the depends. Then set it up in SNMPD. + extend text_blob /bin/cat /var/cache/text_blob_extend/snmp + +Setup cron... + + */5 * * * * /etc/snmp/text_blob -q + +Create a config file at /usr/local/etc/text_blob_extend.json . + =head1 CONFIG The default config is /usr/local/etc/text_blob_extend.json . @@ -72,13 +80,17 @@ The default config is /usr/local/etc/text_blob_extend.json . Example { + "global_envs":{ + "NO_COLOR": 1 + }, "blobs":{ "jls": "jls", "dmesg": "dmesg", "top_io": "top -b -m io -j", "top_cpu": "top -b -m cpu -w -j", "ps": "ps axuw", - "netstat": "netstat -rn" + "routes": "netstat -rn", + "netstat": "ncnetstat -n --pct 2> /dev/null" } } @@ -177,7 +189,6 @@ if ( -f $opts{c} ) { warn($warning); push( @{ $return_json->{data}{warns} }, $warning ); } else { - push( @global_envs, $item ); $config->{global_envs}{$item} = $parsed_config->{global_envs}{$item}; } } ## end foreach my $item (@global_envs) @@ -286,10 +297,7 @@ if ( -f $opts{c} ) { } }; if ($@) { - $return_json->{error} = 1; - $return_json->{errorString} = $@; - return_the_data( $return_json, $opts{B} ); - exit 0; + die($@); } } else { my $warning = 'Config file, "' . $opts{c} . '", does not exist or is not a file'; @@ -328,7 +336,7 @@ foreach my $blob (@blobs) { # # set the blob envs # - if ( defined( $config->{ blob_envs} { $blob } ) ) { + if ( defined( $config->{blob_envs}{$blob} ) ) { foreach my $item ( keys( %{ $config->{blob_envs}{$blob} } ) ) { $ENV{$item} = $config->{blob_envs}{$blob}{$item}; } @@ -338,7 +346,7 @@ foreach my $blob (@blobs) { # my $command = $config->{blobs}{$blob}; my $output = `$command`; - if ($? != 0) { + if ( $? != 0 ) { $return_json->{data}{non_zero_exits}++; } $return_json->{data}{blobs}{$blob} = $output; From fa257954ca38c8f361fe2eef549f4b48ab63ab8c Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Sun, 23 Feb 2025 17:22:53 -0600 Subject: [PATCH 462/497] Pi-hole v6 API changes (#564) --- snmp/pi-hole | 17 ++++------------- 1 file changed, 4 insertions(+), 13 deletions(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index f0d226e01..cb5bc581c 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -5,9 +5,8 @@ IFS=$'\n\t' CONFIGFILE='/etc/snmp/pi-hole.conf' API_AUTH_KEY="" -API_URL="localhost/admin/api.php" -URL_READ_ONLY="?summaryRaw&auth=" -URL_QUERY_TYPE="?getQueryTypes&auth=" +API_URL="localhost/api" +URL_READ_ONLY="/stats/summary" PICONFIGFILE='/etc/pihole/setupVars.conf' DHCPLEASEFILE='/etc/pihole/dhcp.leases' @@ -69,11 +68,6 @@ debug() { echo '[ok] URL_READ_ONLY is set' fi - if [ -z "${URL_QUERY_TYPE}" ]; then - echo '[error] URL_QUERY_TYPE is not set' - else - echo '[ok] URL_QUERY_TYPE not set' - fi if [ -f $PICONFIGFILE ]; then echo '[ok] Pi-Hole config file exists, DHCP stats will be captured if scope active' else @@ -88,12 +82,9 @@ debug() { exportdata() { # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today - # unique_domains / queries_forwarded / queries_cached - GET_STATS=$(curl -s "${API_URL}${URL_READ_ONLY}${API_AUTH_KEY}" | jq '.domains_being_blocked, .dns_queries_today, .ads_blocked_today, .ads_percentage_today, .unique_domains, .queries_forwarded, .queries_cached') + # unique_domains / queries_forwarded / queries_cached / A / AAAA / PTR / SRV + GET_STATS=$(curl -s "${API_URL}${URL_READ_ONLY}${API_AUTH_KEY}" | jq '.gravity.domains_being_blocked, .queries.total, .queries.blocked, .queries.percent_blocked, .queries.unique_domains, .queries.forwarded, .queries.cached, .queries.types.A, .queries.types.AAAA, .queries.types.PTR, .queries.types.SRV') echo "$GET_STATS" | tr " " "\n" - # A / AAAA / PTR / SRV - GET_QUERY_TYPE=$(curl -s "${API_URL}${URL_QUERY_TYPE}${API_AUTH_KEY}" | jq '.[]["A (IPv4)", "AAAA (IPv6)", "PTR", "SRV"]') - echo "$GET_QUERY_TYPE" | tr " " "\n" # Find number of DHCP address in scope and current lease count # case-insensitive compare, just in case :) From 832476bfd4490424c1ca1404635ae17818ad0d7a Mon Sep 17 00:00:00 2001 From: Karl Shea Date: Mon, 24 Feb 2025 18:25:53 -0600 Subject: [PATCH 463/497] Support pi-hole auth (#566) --- snmp/pi-hole | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/snmp/pi-hole b/snmp/pi-hole index cb5bc581c..0c191dcbf 100644 --- a/snmp/pi-hole +++ b/snmp/pi-hole @@ -81,9 +81,15 @@ debug() { } exportdata() { + SESSION_SID="" + + if ! [ -z "$API_AUTH_KEY" ]; then + SESSION_SID=$(curl -X POST --data "{\"password\":\"${API_AUTH_KEY}\"}" -s "${API_URL}/auth" | jq -r '.session.sid') + fi + # domains_being_blocked / dns_query_total / ads_blocked_today / ads_percentage_today # unique_domains / queries_forwarded / queries_cached / A / AAAA / PTR / SRV - GET_STATS=$(curl -s "${API_URL}${URL_READ_ONLY}${API_AUTH_KEY}" | jq '.gravity.domains_being_blocked, .queries.total, .queries.blocked, .queries.percent_blocked, .queries.unique_domains, .queries.forwarded, .queries.cached, .queries.types.A, .queries.types.AAAA, .queries.types.PTR, .queries.types.SRV') + GET_STATS=$(curl -H "X-FTL-SID: ${SESSION_SID}" -s "${API_URL}${URL_READ_ONLY}" | jq '.gravity.domains_being_blocked, .queries.total, .queries.blocked, .queries.percent_blocked, .queries.unique_domains, .queries.forwarded, .queries.cached, .queries.types.A, .queries.types.AAAA, .queries.types.PTR, .queries.types.SRV') echo "$GET_STATS" | tr " " "\n" # Find number of DHCP address in scope and current lease count From 12f0b723494d4d96bd70580055da2535b31dfda7 Mon Sep 17 00:00:00 2001 From: Jason Cheng <30381035+jasoncheng7115@users.noreply.github.com> Date: Thu, 20 Mar 2025 13:22:18 +0800 Subject: [PATCH 464/497] Add hddtemp2 script as modern replacement for hddtemp (#563) * Add hddtemp2 script as modern replacement for hddtemp This commit introduces a new script 'hddtemp2' as an alternative solution for LibreNMS disk temperature monitoring on systems where the hddtemp package is no longer available in the default repositories. Key features: - Uses smartctl for SATA/SAS disk temperature monitoring - Adds native NVMe disk temperature support - Works on modern Ubuntu/Debian systems without requiring hddtemp - Maintains original LibreNMS agent output format - Supports both traditional hard drives and NVMe devices - Includes robust error handling and device validation The script is designed to be a drop-in replacement for hddtemp functionality in LibreNMS agent, ensuring seamless integration with existing monitoring setups while providing broader hardware support. Testing: - Verified on Ubuntu/Debian systems where hddtemp is unavailable - Tested with various SATA, SAS, and NVMe drives - Confirmed compatibility with LibreNMS agent format * Update hddtemp2 --- agent-local/hddtemp2 | 87 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 87 insertions(+) create mode 100644 agent-local/hddtemp2 diff --git a/agent-local/hddtemp2 b/agent-local/hddtemp2 new file mode 100644 index 000000000..cb2f0eefe --- /dev/null +++ b/agent-local/hddtemp2 @@ -0,0 +1,87 @@ +#!/usr/bin/env bash + +# Name: hddtemp2 +# Author: Jason Cheng (www.jason.tools) +# Date: 2025-02-17 +# Version: 1.1 +# Purpose: This script replaces the obsolete hddtemp tool for librenms agent to monitor disk temperatures +# License: Free Software + +if type lsblk >/dev/null 2>&1; then + # Use lsblk to list physical disks, parameters: + # -d: show only disk devices, not partitions + # -n: no header line + # -p: show full device path + # -o NAME,TYPE: output only name and type fields + disks=$(lsblk -dnp -o NAME,TYPE | grep 'disk' | cut -d' ' -f1 | tr '\n' ' ') +else + # Fallback: use find to locate SATA/SAS disks (like sda, hda) + disks=$(find /dev -name '[sh]d[a-z]' -or -name '[sh]d[a-z][a-z]' | tr '\n' ' ') +fi + +smartctl=$(which smartctl 2>/dev/null) +if [ "${smartctl}" != "" ]; then + if [ -x "${smartctl}" ]; then + output="" + for disk in $disks; do + # Exclude non-physical disks like RBD (RADOS Block Device) + if [[ ! "$disk" =~ rbd ]]; then + # Get disk model first + model=$(${smartctl} -i $disk | grep 'Device Model' | cut -d':' -f2 | sed 's/^\s*//g') + + # Try different temperature attributes in order of preference + # First try Airflow_Temperature_Cel + temp_info=$(${smartctl} -A $disk | grep 'Airflow_Temperature_Cel' | awk '{print $10}') + + # If not found, try Temperature_Celsius + if [ -z "$temp_info" ]; then + temp_info=$(${smartctl} -A $disk | grep 'Temperature_Celsius' | awk '{print $10}') + fi + + # If still not found, try Drive_Temperature + if [ -z "$temp_info" ]; then + temp_info=$(${smartctl} -A $disk | grep 'Drive_Temperature' | awk '{print $4}') + fi + + # Format output regardless of which temperature was found + output="${output}|${disk}|${model}|${temp_info}|C|" + fi + done + # Clean output, keep only printable characters + content_smartctl=$(echo "$output" | tr -cd '\12\40-\176') + else + echo "smartctl not executable" >&2 + fi +else + echo "smartctl not installed" >&2 +fi + +nvme_disks=$(find /dev -name 'nvme[0-9]n[0-9]' | tr '\n' ' ') +nvme=$(which nvme 2>/dev/null) +if [ "${nvme}" != "" ]; then + if [ -x "${nvme}" ]; then + output_nvme="" + for disk in $nvme_disks; do + # Also exclude non-physical disks + if [[ ! "$disk" =~ rbd ]]; then + # Only get the numeric part of temperature + temp=$(${nvme} smart-log $disk | grep temperature | awk '{gsub(/[^0-9]/, "", $3); print $3}') + model=$(${nvme} id-ctrl $disk | grep "^mn[[:space:]]*:" | sed 's/^mn[[:space:]]*:[[:space:]]*//g' | tr -d '[:space:]$') + output_nvme="${output_nvme}|${disk}|${model}|${temp}|C|" + fi + done + # Clean output + content_nvme=$(echo "$output_nvme" | tr -cd '\12\40-\176') + else + echo "nvme not executable" >&2 + fi +else + echo "nvme not installed" >&2 +fi + +if [ "${content_smartctl}" != "" ] || [ "${content_nvme}" != "" ] ; then + echo '<<>>' + echo "${content_smartctl}${content_nvme}" +else + echo "no compatible disks found" >&2 +fi From 3acab22cd674c17663fdcd031940e865f3f5ba1b Mon Sep 17 00:00:00 2001 From: YongMin Kim Date: Thu, 12 Jun 2025 02:41:05 +0900 Subject: [PATCH 465/497] Fix proxmox missing VM names (#572) This patch prevents Perl warnings when a QEMU VM or LXC container has no defined name (`name:` or `hostname:` missing in config). Instead of printing an undefined value, the output will now show a fallback identifier in the format: `VMID-`. This ensures: - Clean output with no runtime warnings - Easier identification of VMs without names - Compatibility with LibreNMS parsing logic This has been tested on standalone Proxmox and clustered environments. --- agent-local/proxmox | 41 +++++++++++++++++++++++------------------ 1 file changed, 23 insertions(+), 18 deletions(-) diff --git a/agent-local/proxmox b/agent-local/proxmox index 83184144b..c54fd6a30 100755 --- a/agent-local/proxmox +++ b/agent-local/proxmox @@ -43,29 +43,34 @@ foreach my $child (@{$conn->get("/api2/json/cluster/status")}) { } if (!defined($clustername)) { - $clustername = $hostname; + $clustername = $hostname; } print "<<>>\n"; - print "$clustername\n"; foreach my $vm (@{$conn->get("/api2/json/nodes/$hostname/netstat")}) { - my $vmid = $vm->{'vmid'}; - eval { - my $vmname = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'name'}; - my $tmpl = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config")->{'template'}; - if (defined($tmpl) && $tmpl == 1) { - die; - } - print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; - }; + my $vmid = $vm->{'vmid'}; + my $vmname; + + # Try QEMU (VM) eval { - my $vmname = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'hostname'}; - my $tmpl = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config")->{'template'}; - if (defined($tmpl) && $tmpl == 1) { - die; - } - print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; + my $config = $conn->get("/api2/json/nodes/$hostname/qemu/$vmid/config"); + die if defined($config->{'template'}) && $config->{'template'} == 1; + $vmname = $config->{'name'}; }; -}; + + # Try LXC + if (!defined $vmname) { + eval { + my $config = $conn->get("/api2/json/nodes/$hostname/lxc/$vmid/config"); + die if defined($config->{'template'}) && $config->{'template'} == 1; + $vmname = $config->{'hostname'}; + }; + } + + # Default setting + $vmname //= "VMID-$vmid"; + + print "$vmid/$vm->{'dev'}/$vm->{'in'}/$vm->{'out'}/$vmname\n"; +} From ef280e1cc170253278b88da1e8a3091b729d29e5 Mon Sep 17 00:00:00 2001 From: sshockley Date: Wed, 11 Jun 2025 13:41:20 -0400 Subject: [PATCH 466/497] Fix typo in comment (#570) --- snmp/unpriv/osupdates/osupdates-unpriv-generate.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh index 08a6bca44..2b26c83d8 100644 --- a/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh +++ b/snmp/unpriv/osupdates/osupdates-unpriv-generate.sh @@ -60,7 +60,7 @@ elif command -v pacman &>/dev/null ; then # Arch # calling pacman -Sup does not refresh the package list from the mirrors, # thus it is not useful to find out if there are updates. Keep the pacman call - # to accomodate users that do not have it. checkupdates is in pacman-contrib. + # to accommodate users that do not have it. checkupdates is in pacman-contrib. # also enables snmpd to collect this information if it's not run as root if command -v checkupdates &>/dev/null ; then # shellcheck disable=SC2086 From 2db6043e2504a3eb0b962ec6b6c51208f38bbf18 Mon Sep 17 00:00:00 2001 From: RobJE Date: Wed, 11 Jun 2025 17:41:40 +0000 Subject: [PATCH 467/497] skip empty lines returned by iwinfo in wlNoiseFloor.sh (#568) Sometime after Openwrt 19.07 the `iwinfo` application adds an empty line after each node's information. This breaks `wlNoiseFloor.sh` This commit removes the empty lines to restore functionality --- snmp/Openwrt/wlNoiseFloor.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index 9cebb323d..102cde435 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -13,7 +13,7 @@ fi # Extract noise floor. Note, all associated stations have the same value, so just grab the first one # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! -noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) +noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | grep -v "^$" | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) # Return snmp result /bin/echo "$noise" From 7675d36f3212563a7910f7d5d1c2a9f3ec2aa83d Mon Sep 17 00:00:00 2001 From: bnerickson Date: Wed, 11 Jun 2025 10:42:51 -0700 Subject: [PATCH 468/497] Fix http_access_log_combined config json parse command (#562) --- snmp/http_access_log_combined | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/snmp/http_access_log_combined b/snmp/http_access_log_combined index 93c82e89a..6906ff08d 100755 --- a/snmp/http_access_log_combined +++ b/snmp/http_access_log_combined @@ -226,7 +226,8 @@ my $config; if ( -f $config_file && !-r $config_file ) { die( $config_file . ' is not readable' ); } elsif ( -f $config_file ) { - $config = decode_json( read_file($config_file) ); + my $raw_config = read_file($config_file); + $config = decode_json($raw_config); if ( ref($config) ne 'HASH' ) { die( '"' . ref($config) . '" is the base ref type for the config instead of HASH' ); } From 678ee01b8dc27c88cddfc23e782a933ae7e79e80 Mon Sep 17 00:00:00 2001 From: Megachip Date: Mon, 28 Jul 2025 21:56:26 +0200 Subject: [PATCH 469/497] Update asterisk (#575) Parsing calls processed Updates on App side required --- snmp/asterisk | 1 + 1 file changed, 1 insertion(+) diff --git a/snmp/asterisk b/snmp/asterisk index 7c6b7a19a..40115700c 100644 --- a/snmp/asterisk +++ b/snmp/asterisk @@ -14,6 +14,7 @@ then $ASCLI -rx "core show channels" | awk '/active calls/ { print "Calls=" $1 } /active channels/ { print "Channels=" $1}' $ASCLI -rx 'sip show peers' | awk '/sip peers/ { print "SipPeers=" $1 "\nSipMonOnline=" $5 "\nSipMonOffline=" $7 "\nSipUnMonOnline=" $10 "\nSipUnMonOffline=" $12}' $ASCLI -rx 'iax2 show peers' | awk '/iax2 peers/ { gsub("\\[",""); gsub("\\]",""); print "Iax2Peers=" $1 "\nIax2Online=" $4 "\nIax2Offline=" $6 "\nIax2Unmonitored=" $8}' + $ASCLI -rx 'core show calls' | awk '/calls processed/ { print "CallsProcessed=" $1 }' else exit 0 From 9bbc04914b3c2f7e6e85c729c24573f3f67a42f4 Mon Sep 17 00:00:00 2001 From: "Zane C. Bowers-Hadley" Date: Thu, 18 Sep 2025 14:07:39 -0600 Subject: [PATCH 470/497] manually point ntp-client at 127.0.0.1 (#584) Fixes it on Debian 13 since ntpq fails to figure out it should try that first. --- snmp/ntp-client | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/snmp/ntp-client b/snmp/ntp-client index eccb5e50c..33c46f63c 100755 --- a/snmp/ntp-client +++ b/snmp/ntp-client @@ -22,12 +22,12 @@ if [ -f "$CONFIG" ]; then . "$CONFIG" fi -NTP_OFFSET=$($BIN_NTPQ -c rv | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}') -NTP_FREQUENCY=$($BIN_NTPQ -c rv | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}') -NTP_SYS_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') -NTP_CLK_JITTER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') -NTP_WANDER=$($BIN_NTPQ -c rv | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}') -NTP_VERSION=$($BIN_NTPQ -c rv | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}') +NTP_OFFSET=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "offset" | $BIN_AWK -Foffset= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_FREQUENCY=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "frequency" | $BIN_AWK -Ffrequency= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_SYS_JITTER=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "sys_jitter" | $BIN_AWK -Fsys_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_CLK_JITTER=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "clk_jitter" | $BIN_AWK -Fclk_jitter= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_WANDER=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "clk_wander" | $BIN_AWK -Fclk_wander= '{print $2}' | $BIN_AWK -F, '{print $1}') +NTP_VERSION=$($BIN_NTPQ -c rv 127.0.0.1 | $BIN_GREP "version" | $BIN_AWK -F'ntpd ' '{print $2}' | $BIN_AWK -F. '{print $1}') echo '{"data":{"offset":"'"$NTP_OFFSET"'","frequency":"'"$NTP_FREQUENCY"'","sys_jitter":"'"$NTP_SYS_JITTER"'","clk_jitter":"'"$NTP_CLK_JITTER"'","clk_wander":"'"$NTP_WANDER"'"},"version":"'"$NTP_VERSION"'","error":"0","errorString":""}' From 3fa3e1e219ebe09100785e1a5563c585021b0d60 Mon Sep 17 00:00:00 2001 From: Osik Date: Mon, 22 Sep 2025 15:38:35 +0200 Subject: [PATCH 471/497] read battery.temperature or ups.temperature (#582) --- snmp/ups-nut.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ups-nut.sh b/snmp/ups-nut.sh index b75580a4b..2db6b8d88 100755 --- a/snmp/ups-nut.sh +++ b/snmp/ups-nut.sh @@ -39,7 +39,7 @@ do fi done -UPSTEMP="ups\.temperature: [0-9.]+" +UPSTEMP="^(battery|ups)\.temperature: [0-9.]+" OUT=$(echo "$TMP" | grep -Eo "$UPSTEMP" | awk '{print $2}' | LANG=C sort | head -n 1) [ -n "$OUT" ] && echo "$OUT" || echo "Unknown" From 8d8e5ab5a2c22136f398856b4b4c43663df12382 Mon Sep 17 00:00:00 2001 From: kossusukka Date: Fri, 21 Nov 2025 02:43:07 +0200 Subject: [PATCH 472/497] Add support for I2PD application monitoring (#586) * SNMP extend agent for application I2Pd * Better error reporting to LibreNMS. Minor tweaks * Update i2pd-stats.py * Fixed code layout as Super-Linter suggested --------- Co-authored-by: Neil Lathwood --- snmp/i2pd-stats.py | 114 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 114 insertions(+) create mode 100644 snmp/i2pd-stats.py diff --git a/snmp/i2pd-stats.py b/snmp/i2pd-stats.py new file mode 100644 index 000000000..2db7983c5 --- /dev/null +++ b/snmp/i2pd-stats.py @@ -0,0 +1,114 @@ +#!/usr/bin/env python3 + +# i2pd-stats.py +# SNMP Extend-agent for exporting I2Pd statistics to LibreNMS +# +# Inspired from i2pdctl: +# https://github.com/PurpleI2P/i2pd-tools +# +# Run script and it will print JSON-blob into stdout +# Set I2P Control socket params below! +# +# Installation: +# 1. copy this file to /etc/snmp/i2pd-stats.py +# 2. chmod +x /etc/snmp/i2pd-stats.py +# 3. edit /etc/snmp/snmpd.conf and add following line: +# extend i2pd /etc/snmp/i2pd-stats.py +# 4. systemctl restart snmpd.service +# +# author: Kossusukka + +import json +import os +import ssl +import urllib.error +import urllib.parse +import urllib.request + +######### CONFIGURATION ############## +I2PC_URL = "https://127.0.0.1:7650/" +I2PC_PASS = "itoopie" +##### END OF CONFIGURATION ########### + + +# Do not change! Must match LibreNMS version +JSONVER = "1" + + +class I2PControl(object): + """Talk to I2PControl API""" + + def __init__(self, url, password): + self.url = url + self.password = password + self._token = None + + @property + def token(self): + """Cached authentication token""" + if not self._token: + try: + self._token = self.do_post( + self.url, + json.dumps( + { + "id": 1, + "method": "Authenticate", + "params": {"API": 1, "Password": self.password}, + "jsonrpc": "2.0", + } + ), + )["result"]["Token"] + except KeyError: + post_error("1", "Invalid I2PControl password or token!") + exit(1) + return self._token + + def do_post(self, url, data): + """HTTP(S) handler""" + req = urllib.request.Request(url, data=data.encode()) + try: + with urllib.request.urlopen( + req, context=ssl._create_unverified_context(), timeout=5 + ) as f: + resp = f.read().decode("utf-8") + except urllib.error.URLError: + post_error("2", "Unable to connect I2PControl socket!") + exit(1) + except TimeoutError: + post_error("3", "Connection timed out to I2PControl socket!") + exit(1) + return json.loads(resp) + + def request(self, method, params): + """Execute authenticated request""" + params["Token"] = self.token + return self.do_post( + self.url, + json.dumps({"id": 1, "method": method, "params": params, "jsonrpc": "2.0"}), + ) + + +def post_error(code: str, message: str): + """Post error code+message as JSON for LibreNMS""" + resp_err = {"data": "", "version": JSONVER, "error": code, "errorString": message} + + print(json.dumps(resp_err)) + + +def main(): + # Craft JSON request for I2PC + JSON_REQUEST = json.loads( + '{ "i2p.router.uptime": "", "i2p.router.net.status": "", "i2p.router.net.bw.inbound.1s": "", "i2p.router.net.bw.inbound.15s": "", "i2p.router.net.bw.outbound.1s": "", "i2p.router.net.bw.outbound.15s": "", "i2p.router.net.tunnels.participating": "", "i2p.router.net.tunnels.successrate": "", "i2p.router.netdb.knownpeers": "", "i2p.router.netdb.activepeers": "", "i2p.router.net.total.received.bytes": "", "i2p.router.net.total.sent.bytes": "" }' + ) + + ctl = I2PControl(I2PC_URL, I2PC_PASS) + + resp = ctl.request("RouterInfo", JSON_REQUEST)["result"] + resp_full = {"data": resp, "version": JSONVER, "error": "0", "errorString": ""} + + print(json.dumps(resp_full)) + + +if __name__ == "__main__": + main() From 81672fa8e56c2074b02d3084e98844918e064fbd Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 13 Dec 2025 16:18:42 +0100 Subject: [PATCH 473/497] fix - redis parsing (#587) LGTM --- snmp/redis.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/snmp/redis.py b/snmp/redis.py index cd861e1f1..d0da1e4d3 100755 --- a/snmp/redis.py +++ b/snmp/redis.py @@ -34,6 +34,15 @@ error_string = "category not defined" break + if b"," in d: + # ignore multiparameter lines + # b'listener0:name=tcp,bind=127.0.0.1,bind=-::1,port=6379' + # b'io_thread_0:clients=62,reads=30982,writes=30918' + # b'module:name=vectorset,ver=1,api=1,filters=0,usedby=[],using=[],options=[handle-io-errors|handle-repl-async-load]' + # b'db0:keys=2,expires=1,avg_ttl=19439,subexpiry=0' + # b'db0_distrib_zsets_items:0=18446744073709551379,1=238' + continue + k, v = d.split(b":") k = k.decode("utf-8") v = v.decode("utf-8") From bce28ba60089f846a3e22be0efdcf461063e80dd Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Sat, 13 Dec 2025 20:16:30 +0100 Subject: [PATCH 474/497] ntp-server fix (#588) --- snmp/ntp-server.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh index 4fb02e8c5..fe76501a7 100755 --- a/snmp/ntp-server.sh +++ b/snmp/ntp-server.sh @@ -75,7 +75,7 @@ if [ "$VER" = "4.2.6p5" ]; then else USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) fi -CMD2=$($USECMD | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') +CMD2=$($USECMD 2>/dev/null | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') # shellcheck disable=SC2086 TIMESINCERESET=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $1}') From 7bbf69181e0280a9a3cc1d8659a26fa1ca86154d Mon Sep 17 00:00:00 2001 From: Lennart Date: Tue, 30 Dec 2025 00:30:56 +0100 Subject: [PATCH 475/497] Update CloudLinux detection (#591) --- snmp/distro | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/snmp/distro b/snmp/distro index 843eef062..c3b9542e7 100755 --- a/snmp/distro +++ b/snmp/distro @@ -20,6 +20,14 @@ elif [ "${OS}" = "Linux" ] ; then DIST=$(cat /etc/fedora-release | awk '{print $1}') REV=$(cat /etc/fedora-release | sed s/.*release\ // | sed s/\ .*//) + elif [ -f /etc/cloudlinux-release ] ; then + DIST="CloudLinux" + if [ "$(cat /etc/cloudlinux-release)" = "CloudLinux release 10" ]; then + REV=$(cldetect --detect-os | sed 's/CloudLinux //'); + else + REV=$(cat /etc/cloudlinux-release | sed s/.*release\ // | sed s/\ .*//); fi + IGNORE_OS_RELEASE=1 + elif [ -f /etc/redhat-release ] ; then DIST=$(cat /etc/redhat-release | awk '{print $1}') if [ "${DIST}" = "CentOS" ]; then @@ -45,12 +53,12 @@ elif [ "${OS}" = "Linux" ] ; then REV=$(cat /etc/redhat-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/almalinux-release ] ; then - DIST='AlmaLinux' + DIST="AlmaLinux" #PSEUDONAME=$(cat /etc/almalinux-release | sed s/.*\(// | sed s/\)//) REV=$(cat /etc/almalinux-release | sed s/.*release\ // | sed s/\ .*//) elif [ -f /etc/mandrake-release ] ; then - DIST='Mandrake' + DIST="Mandrake" #PSEUDONAME=$(cat /etc/mandrake-release | sed s/.*\(// | sed s/\)//) REV=$(cat /etc/mandrake-release | sed s/.*release\ // | sed s/\ .*//) From 633b41c0b14b4fa6cef18093275956ab3fe3037d Mon Sep 17 00:00:00 2001 From: SourceDoctor Date: Thu, 8 Jan 2026 09:50:08 +0100 Subject: [PATCH 476/497] rewrite ntp-server application (#593) * rewrite ntp-server application * . * . --- snmp/ntp-server.py | 96 +++++++++++++++++++++++++++++++++++ snmp/ntp-server.sh | 122 --------------------------------------------- 2 files changed, 96 insertions(+), 122 deletions(-) create mode 100755 snmp/ntp-server.py delete mode 100755 snmp/ntp-server.sh diff --git a/snmp/ntp-server.py b/snmp/ntp-server.py new file mode 100755 index 000000000..f65482aae --- /dev/null +++ b/snmp/ntp-server.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python3 + +import json +import subprocess + + +def run_command(command_list): + + result = subprocess.run( + command_list, stdout=subprocess.PIPE, stderr=subprocess.DEVNULL + ) + + return result.stdout.decode("utf-8") + + +def time_since_reset_to_seconds(part): + # time since reset: 3D 08:06:47 + _part = part.split(":") + k = _part[0] + + # calc seconds + try: + v_h = _part[1] + except IndexError: + v_h = "0" + try: + v_m = _part[2] + except IndexError: + v_m = "0" + try: + v_s = _part[3] + except IndexError: + v_s = "0" + + v = 0 + + if "D" in v_h: + v_h_part = v_h.split() + v += int(v_h_part[0].replace("D", "")) * 86400 + v += int(v_h_part[1]) * 3600 + else: + v += int(v_h) * 3600 + + v += int(v_m) * 60 + + v += int(v_s) + + v = str(v) + + return k, v + + +# -------- first command --------------------------- + +output = run_command(["ntpq", "-c rv"]) + +parts = output.replace("\n", "").split(",") + +data_dict = {} + +for part in parts: + if part.count("=") != 1: + continue + + k, v = part.split("=") + + data_dict[k.strip()] = v.replace('"', "") + +# -------- second command --------------------------- + +output2 = run_command(["ntpq", "-c iostats 127.0.0.1"]) + +parts = output2.split("\n") + +for part in parts: + if part.count(":") < 1: + continue + + if "time since reset" in part: + k, v = time_since_reset_to_seconds(part) + + elif part.count(":") > 1: + continue + + else: + k, v = part.split(":") + + k = k.strip().replace(" ", "_") + + data_dict[k] = v.strip().split()[0] + +# ---------------------------------------------------- + +result_dict = {"error": 0, "errorString": "", "version": 1, "data": data_dict} + +print(json.dumps(result_dict, indent=4, sort_keys=True)) diff --git a/snmp/ntp-server.sh b/snmp/ntp-server.sh deleted file mode 100755 index fe76501a7..000000000 --- a/snmp/ntp-server.sh +++ /dev/null @@ -1,122 +0,0 @@ -#!/bin/sh -# Please make sure the paths below are correct. -# Alternatively you can put them in $0.conf, meaning if you've named -# this script ntp-client.sh then it must go in ntp-client.sh.conf . -# -# NTPQV output version of "ntpq -c rv" -# p1 DD-WRT and some other outdated linux distros -# p11 FreeBSD 11 and any linux distro that is up to date -# -# If you are unsure, which to set, run this script and make sure that -# the JSON output variables match that in "ntpq -c rv". -# - -CONFIGFILE=/etc/snmp/ntp-server.conf - -BIN_ENV='/usr/bin/env' - -if [ -f $CONFIGFILE ] ; then - # shellcheck disable=SC1090 - . $CONFIGFILE -fi - -BIN_NTPD="$BIN_ENV ntpd" -BIN_NTPQ="$BIN_ENV ntpq" -BIN_NTPDC="$BIN_ENV ntpdc" -BIN_GREP="$BIN_ENV grep" -BIN_TR="$BIN_ENV tr" -BIN_CUT="$BIN_ENV cut" -BIN_SED="$BIN_ENV sed" -BIN_AWK="$BIN_ENV awk" - -NTPQV="p11" -################################################################ -# Don't change anything unless you know what are you doing # -################################################################ -CONFIG=$0".conf" -if [ -f "$CONFIG" ]; then - # shellcheck disable=SC1090 - . "$CONFIG" -fi -VERSION=1 - -STRATUM=$($BIN_NTPQ -c rv | $BIN_GREP -Eow "stratum=[0-9]+" | $BIN_CUT -d "=" -f 2) - -# parse the ntpq info that requires version specific info -NTPQ_RAW=$($BIN_NTPQ -c rv | $BIN_GREP jitter | $BIN_SED 's/[[:alpha:]=,_]/ /g') -if [ $NTPQV = "p11" ]; then - # shellcheck disable=SC2086 - OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') - # shellcheck disable=SC2086 - FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') - # shellcheck disable=SC2086 - SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') - # shellcheck disable=SC2086 - CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') - # shellcheck disable=SC2086 - CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $7}') -fi -if [ $NTPQV = "p1" ]; then - # shellcheck disable=SC2086 - OFFSET=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $2}') - # shellcheck disable=SC2086 - FREQUENCY=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $3}') - # shellcheck disable=SC2086 - SYS_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $4}') - # shellcheck disable=SC2086 - CLK_JITTER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $5}') - # shellcheck disable=SC2086 - CLK_WANDER=$(echo $NTPQ_RAW | $BIN_AWK -F ' ' '{print $6}') -fi - -VER=$($BIN_NTPD --version 2>&1 | cut -d\ -f 2 | head -n 1) -if [ "$VER" = "4.2.6p5" ]; then - USECMD=$(echo "$BIN_NTPDC" -c iostats 127.0.0.1) -else - USECMD=$(echo "$BIN_NTPQ" -c iostats 127.0.0.1) -fi -CMD2=$($USECMD 2>/dev/null | $BIN_TR -d ' ' | $BIN_CUT -d : -f 2 | $BIN_TR '\n' ' ') - -# shellcheck disable=SC2086 -TIMESINCERESET=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $1}') -# shellcheck disable=SC2086 -RECEIVEDBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $2}') -# shellcheck disable=SC2086 -FREERECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $3}') -# shellcheck disable=SC2086 -USEDRECEIVEBUFFERS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $4}') -# shellcheck disable=SC2086 -LOWWATERREFILLS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $5}') -# shellcheck disable=SC2086 -DROPPEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $6}') -# shellcheck disable=SC2086 -IGNOREDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $7}') -# shellcheck disable=SC2086 -RECEIVEDPACKETS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $8}') -# shellcheck disable=SC2086 -PACKETSSENT=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $9}') -# shellcheck disable=SC2086 -PACKETSENDFAILURES=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $10}') -#INPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $11}') -# shellcheck disable=SC2086 -USEFULINPUTWAKEUPS=$(echo $CMD2 | $BIN_AWK -F ' ' '{print $12}') - -echo '{"data":{"offset":"'"$OFFSET"\ -'","frequency":"'"$FREQUENCY"\ -'","sys_jitter":"'"$SYS_JITTER"\ -'","clk_jitter":"'"$CLK_JITTER"\ -'","clk_wander":"'"$CLK_WANDER"\ -'","stratum":"'"$STRATUM"\ -'","time_since_reset":"'"$TIMESINCERESET"\ -'","receive_buffers":"'"$RECEIVEDBUFFERS"\ -'","free_receive_buffers":"'"$FREERECEIVEBUFFERS"\ -'","used_receive_buffers":"'"$USEDRECEIVEBUFFERS"\ -'","low_water_refills":"'"$LOWWATERREFILLS"\ -'","dropped_packets":"'"$DROPPEDPACKETS"\ -'","ignored_packets":"'"$IGNOREDPACKETS"\ -'","received_packets":"'"$RECEIVEDPACKETS"\ -'","packets_sent":"'"$PACKETSSENT"\ -'","packet_send_failures":"'"$PACKETSENDFAILURES"\ -'","input_wakeups":"'"$PACKETSENDFAILURES"\ -'","useful_input_wakeups":"'"$USEFULINPUTWAKEUPS"\ -'"},"error":"0","errorString":"","version":"'$VERSION'"}' From 83784b2e1c6dc7d23e95ec23f2298464caf6e107 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dr=2E=20Andr=C3=A1s=20Korn?= Date: Sat, 10 Jan 2026 13:58:14 +0100 Subject: [PATCH 477/497] osupdate: fix count of upgradable packages for APT (#580) * osupdate: fix count of upgradable packages for APT Originally, the script used the output of `apt-get upgrade` to obtain a list of upgradable packages, but this can be very off, as upgrade won't install upgrades with new dependencies, so these would not be counted. For example, one of my bookworm (oldstable) systems that would now be upgraded to trixie lists an order of magnitude fewer packages than `apt list --upgradable` (23 vs 216). `apt list --upgradable` is the command that actually tells you which packages have newer versions in the configured repositories (with dependencies that are satisfiable). This also speeds up the query considerably: `time apt-get -qq -s upgrade` takes ~8s on one system, while `time apt list --upgradable` takes less than 1s on the same system. `apt list --upgradable` has been available since Debian 8 (codename "jessie"), released in April 2015. * snmp/osupdate: check for existence of apt, not apt-get ... since we're invoking `apt list --upgradeable` to find the number of packages that can be upgraded. --- snmp/osupdate | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/snmp/osupdate b/snmp/osupdate index 9949fba44..11a6d836e 100755 --- a/snmp/osupdate +++ b/snmp/osupdate @@ -22,8 +22,8 @@ BIN_DNF='/usr/bin/env dnf' CMD_DNF='-q check-update' BIN_TDNF='/usr/bin/env tdnf' CMD_TDNF='-q check-update' -BIN_APT='/usr/bin/env apt-get' -CMD_APT='-qq -s upgrade' +BIN_APT='/usr/bin/env apt' +CMD_APT='list --upgradable' BIN_PACMAN='/usr/bin/env pacman' CMD_PACMAN='-Sup' BIN_CHECKUPDATES='/usr/bin/env checkupdates' @@ -92,10 +92,10 @@ elif command -v yum &>/dev/null ; then else echo "0"; fi -elif command -v apt-get &>/dev/null ; then +elif command -v apt &>/dev/null ; then # Debian / Devuan / Ubuntu # shellcheck disable=SC2086 - UPDATES=$($BIN_APT $CMD_APT | $BIN_GREP $CMD_GREP 'Inst') + UPDATES=$($BIN_APT $CMD_APT 2>&1 | $BIN_GREP $CMD_GREP '/') if [ "$UPDATES" -ge 1 ]; then echo "$UPDATES"; else From e44159572ef86f89b6c9091751c83ac7aea4a3ad Mon Sep 17 00:00:00 2001 From: perceival Date: Sun, 15 Feb 2026 19:46:51 +0100 Subject: [PATCH 478/497] Added openwrt temperature monitoring and other minor updates. Coupled with relevant librenms PR. --- snmp/Openwrt/LICENSE | 339 +++++++++++++++++++++++++ snmp/Openwrt/README.md | 263 +++++++++++++++++++ snmp/Openwrt/cleanup-and-fix.sh | 31 +++ snmp/Openwrt/distro.sh | 6 + snmp/Openwrt/lm-sensors-pass.sh | 119 +++++++++ snmp/Openwrt/setup-snmpd.sh | 91 +++++++ snmp/Openwrt/snmpd-config-generator.sh | 91 +++++++ snmp/Openwrt/wlClients.sh | 75 +++++- snmp/Openwrt/wlFrequency.sh | 2 +- snmp/Openwrt/wlNoiseFloor.sh | 2 +- 10 files changed, 1010 insertions(+), 9 deletions(-) create mode 100644 snmp/Openwrt/LICENSE create mode 100644 snmp/Openwrt/README.md create mode 100644 snmp/Openwrt/cleanup-and-fix.sh create mode 100644 snmp/Openwrt/distro.sh create mode 100644 snmp/Openwrt/lm-sensors-pass.sh create mode 100644 snmp/Openwrt/setup-snmpd.sh create mode 100644 snmp/Openwrt/snmpd-config-generator.sh diff --git a/snmp/Openwrt/LICENSE b/snmp/Openwrt/LICENSE new file mode 100644 index 000000000..d159169d1 --- /dev/null +++ b/snmp/Openwrt/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/snmp/Openwrt/README.md b/snmp/Openwrt/README.md new file mode 100644 index 000000000..70f7c2ce2 --- /dev/null +++ b/snmp/Openwrt/README.md @@ -0,0 +1,263 @@ +# OpenWrt-LibreNMS +SNMPD OpenWrt configuration - integration for OpenWrt devices to be visible wtih more features in LibreNMS network management platform. Based on https://github.com/librenms/librenms-agent/tree/master/snmp/Openwrt + +This package provides unified, auto-detecting SNMP monitoring for OpenWrt devices with support for: +- Wireless interface metrics (clients, frequency, rate, noise, SNR) +- Thermal sensor monitoring via LM-SENSORS-MIB +- Auto-discovery of wireless interfaces +- Dynamic configuration generation + +## Key Features + +### 1. Auto-Generation of wlInterfaces.txt + +`wlClients.sh` auto-detects all wireless interfaces on first run and generates the file automatically. + +```bash +# First run automatically creates wlInterfaces.txt +/etc/librenms/wlClients.sh +``` + +### 2. Dynamic Config Generator +**Problem**: Different devices need different snmpd configs based on their wireless interfaces (ap1: wl0-ap0, wl1-ap0; ap2: wlan0, wlan02, wlan12, wlan22). + +**Solution**: `snmpd-config-generator.sh` reads wlInterfaces.txt and generates appropriate UCI config entries for all detected interfaces. + +```bash +# Generate config for current device +/etc/librenms/snmpd-config-generator.sh +``` + +### 3. Unified Base Configuration +**Problem**: Repetitive config entries across devices. + +**Solution**: /etc/config/snmpd is updated with dynamic content. + +### 4. Error Handling +All scripts have: +- Error messages +- Argument validation +- Fallback behavior +- Consistent exit codes + +### 5. Thermal Sensor Support (LM-SENSORS-MIB) +Uses the `pass` directive to provide proper LM-SENSORS-MIB thermal sensors: +- `.1.3.6.1.4.1.2021.13.16.2.1.1` - lmTempSensorsIndex (INTEGER) +- `.1.3.6.1.4.1.2021.13.16.2.1.2` - lmTempSensorsDevice (STRING) +- `.1.3.6.1.4.1.2021.13.16.2.1.3` - lmTempSensorsValue (Gauge32, millidegrees) + +The `pass` script (`lm-sensors-pass.sh`) provides proper data types and table structure, enabling automatic discovery in LibreNMS. + +## File Structure + +``` +/etc/librenms/ +├── wlInterfaces.txt # Auto-generated interface list +├── wlClients.sh # Count Wi-Fi clients (auto-generates wlInterfaces.txt) +├── wlFrequency.sh # Get operating frequency +├── wlNoiseFloor.sh # Get noise floor +├── wlRate.sh # Get TX/RX rates (min/avg/max) +├── wlSNR.sh # Get SNR (min/avg/max) +├── lm-sensors-pass.sh # LM-SENSORS-MIB pass script for thermal sensors +├── distro.sh # Extract OpenWrt version string +├── cleanup-and-fix.sh # Remove old exec entries +└── snmpd-config-generator.sh # Generate UCI config entries +``` + +## Installation + +### Quick Setup +```bash +# Run setup script +chmod +x setup-snmpd.sh +./setup-snmpd.sh + +### Manual Installation +```bash +# Create directory +mkdir -p /etc/librenms + +# Copy all scripts +cp wl*.sh lm-sensors-pass.sh distro.sh cleanup-and-fix.sh snmpd-config-generator.sh /etc/librenms/ +chmod +x /etc/librenms/*.sh + +# Generate interface list +/etc/librenms/wlClients.sh + +# Generate config +/etc/librenms/snmpd-config-generator.sh +``` + +## Usage + +### Generate wlInterfaces.txt +```bash +# Auto-detect all wireless interfaces +/etc/librenms/wlClients.sh + +# Manually edit if needed +vi /etc/librenms/wlInterfaces.txt +# Format: interface,ssid +# Example: +# wl0-ap0,MySSID +# wlan0,GuestNetwork +``` + +### Generate SNMPD Config +```bash +# Generate all extend entries for detected interfaces +/etc/librenms/snmpd-config-generator.sh + +# Output can be appended to /etc/config/snmpd +/etc/librenms/snmpd-config-generator.sh >> /etc/config/snmpd +``` + +### Test Scripts Individually +```bash +# Test client count +/etc/librenms/wlClients.sh wlan0 + +# Test frequency +/etc/librenms/wlFrequency.sh wlan0 + +# Test rate (interface, direction, stat) +/etc/librenms/wlRate.sh wlan0 tx avg + +# Test SNR (interface, stat) +/etc/librenms/wlSNR.sh wlan0 avg + +# Test thermal sensors (pass script) +/etc/librenms/lm-sensors-pass.sh -g .1.3.6.1.4.1.2021.13.16.2.1.3.0 +``` + +### Query SNMP +```bash +# From monitoring server +snmpwalk -v2c -c public localhost .1.3.6.1.4.1.8072.1.3.2 + +# Specific metrics +snmpget -v2c -c public localhost NET-SNMP-EXTEND-MIB::nsExtendOutput1Line.\"clients-wlan0\" +snmpget -v2c -c public localhost NET-SNMP-EXTEND-MIB::nsExtendOutput1Line.\"frequency-wlan0\" + +# Thermal sensors (LM-SENSORS-MIB) +snmpwalk -v2c -c public localhost LM-SENSORS-MIB::lmTempSensorsValue +``` + +## SNMP OID Reference + +### Wireless Metrics (via nsExtend) +Base OID: `.1.3.6.1.4.1.8072.1.3.2` + +Per interface: +- `clients-` - Connected client count +- `frequency-` - Operating frequency (MHz) +- `rate-tx--min/avg/max` - TX bitrate stats +- `rate-rx--min/avg/max` - RX bitrate stats +- `noise-floor-` - Noise floor (dBm) +- `snr--min/avg/max` - Signal-to-Noise Ratio (dB) + +### Thermal Sensors (LM-Sensors MIB) +- `.1.3.6.1.4.1.2021.13.16.2.1.1` - lmSensorsIndex (INTEGER) +- `.1.3.6.1.4.1.2021.13.16.2.1.2` - lmSensorsDevice (STRING) +- `.1.3.6.1.4.1.2021.13.16.2.1.3` - lmSensorsValue (Gauge32, millidegrees) + +## Configuration Examples + +### Example: 4 Interface Device (like native OpenWrt) +wlInterfaces.txt: +``` +wl0-ap0,IoT +wl0-ap1,guest +wl1-ap0,main +wl1-ap1,uplink +``` + +Generated extends: +- clients-wl0-ap0, clients-wl0-ap1, clients-wl1-ap0, clients-wl1-ap1 +- frequency-wl0-ap0, frequency-wl0-ap1, frequency-wl1-ap0, frequency-wl1-ap1 +- rate-tx-wl0-ap0-min/avg/max (and all other interfaces) +- rate-rx-wl0-ap0-min/avg/max (and all other interfaces) +- And so on... + +### Example: Multi-VLAN Device (like gl.Inet flint3) +wlInterfaces.txt: +``` +wlan0,MainNetwork +wlan02,VLAN2 +wlan12,VLAN12 +wlan22,VLAN22 +``` + +## Troubleshooting + +### wlInterfaces.txt not generated +```bash +# Check for wireless interfaces +ls /sys/class/net/*/wireless +ls /sys/class/net/*/phy80211 + +# Manually create the file +cat > /etc/librenms/wlInterfaces.txt << EOF +wlan0,YourSSID +EOF +``` + +### SNMP not returning data +```bash +# Check if snmpd is running +/etc/init.d/snmpd status + +# Check if scripts are executable +ls -la /etc/librenms/*.sh + +# Test script directly +/etc/librenms/wlClients.sh wlan0 + +# Check snmpd logs +logread | grep snmpd +``` + +### Script errors +```bash +# Enable debug output +sh -x /etc/librenms/wlClients.sh wlan0 + +# Check for required commands +which iw iwinfo awk cut grep +``` + +## Comparison: Before vs After + +### Before (Manual, Per-Device) +❌ Required manual creation of wlInterfaces.txt for each device +❌ Different config file for each device type +❌ Repetitive config entries (280+ lines) +❌ Hard to maintain across multiple devices +❌ Interface changes require manual config updates + +### After (Automated, Unified) +✅ Auto-detects wireless interfaces +✅ Single config generator works for all devices +✅ Generates only needed entries +✅ Easy to maintain and replicate +✅ Interface changes auto-detected on script run + +## Benefits + +1. **Zero Manual Configuration**: Just run setup script +2. **Device-Agnostic**: Works on any OpenWrt device +3. **Self-Documenting**: Auto-generated configs show what's monitored +4. **Easy Replication**: Same process for all devices +5. **Future-Proof**: Adding interfaces doesn't require config changes +6. **Reduced Errors**: No manual typing of repetitive entries +7. **Consistent**: All devices use same monitoring approach + +## Security Notes + +- Default SNMP community strings should be changed in production +- Restrict SNMP access to monitoring network (192.168.0.0/24 in examples) +- Use SNMPv3 for better security if supported by your NMS + +## License + +These scripts are provided as-is for use with OpenWrt systems. diff --git a/snmp/Openwrt/cleanup-and-fix.sh b/snmp/Openwrt/cleanup-and-fix.sh new file mode 100644 index 000000000..4d4637b8f --- /dev/null +++ b/snmp/Openwrt/cleanup-and-fix.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +# cleanup-and-fix.sh +# Run this on the OpenWrt device to: +# 1. Remove old broken exec entries +# 2. Verify pass script is working + +echo "Step 1: Removing old exec entries for lmSensors..." + +# Remove exec entries with miboid containing 2021.13.16 +uci show snmpd | grep "exec.*=exec" | cut -d'.' -f2 | cut -d'=' -f1 | while read idx; do + miboid=$(uci get snmpd.$idx.miboid 2>/dev/null) + if echo "$miboid" | grep -q "2021\.13\.16"; then + echo " Removing snmpd.$idx (miboid: $miboid)" + uci delete snmpd.$idx + fi +done + +uci commit snmpd + +echo "" +echo "Step 2: Verifying pass configuration..." +uci show snmpd | grep "pass.*lm-sensors" + +echo "" +echo "Step 3: Restarting snmpd..." +/etc/init.d/snmpd restart + +echo "" +echo "Done! Now test with:" +echo " snmpwalk -v2c -c public localhost LM-SENSORS-MIB::lmTempSensorsValue" diff --git a/snmp/Openwrt/distro.sh b/snmp/Openwrt/distro.sh new file mode 100644 index 000000000..3af86780f --- /dev/null +++ b/snmp/Openwrt/distro.sh @@ -0,0 +1,6 @@ +#!/bin/sh + +# distro.sh +# Extracts OpenWrt version string from banner (from "OpenWrt" onwards) + +grep OpenWrt /etc/banner | sed 's/.*OpenWrt/OpenWrt/' | head -1 diff --git a/snmp/Openwrt/lm-sensors-pass.sh b/snmp/Openwrt/lm-sensors-pass.sh new file mode 100644 index 000000000..763a2b45b --- /dev/null +++ b/snmp/Openwrt/lm-sensors-pass.sh @@ -0,0 +1,119 @@ +#!/bin/sh + +# lm-sensors-pass.sh +# SNMP pass script for LM-SENSORS-MIB thermal sensors +# Provides proper MIB structure at .1.3.6.1.4.1.2021.13.16.2.1 + +BASE_OID=".1.3.6.1.4.1.2021.13.16.2.1" + +# Function to get all thermal zone data +# Output format: index:name:temp +# Re-indexes zones sequentially starting from 1 +get_zones() { + local idx=0 + for zone in /sys/devices/virtual/thermal/thermal_zone*; do + [ -d "$zone" ] || continue + idx=$((idx + 1)) + zone_type=$(cat "$zone/type" 2>/dev/null || echo "unknown") + zone_temp=$(cat "$zone/temp" 2>/dev/null || echo "0") + echo "$idx:$zone_type:$zone_temp" + done | sort -t':' -k1 -n +} + +case "$1" in + -g) + # GET request - return exact OID match + REQ_OID="$2" + FOUND=0 + + while IFS=':' read idx name temp; do + case "$REQ_OID" in + "$BASE_OID.1.$idx") + echo "$REQ_OID" + echo "integer" + echo "$idx" + FOUND=1 + break + ;; + "$BASE_OID.2.$idx") + echo "$REQ_OID" + echo "string" + echo "$name" + FOUND=1 + break + ;; + "$BASE_OID.3.$idx") + echo "$REQ_OID" + echo "gauge" + echo "$temp" + FOUND=1 + break + ;; + esac + done << EOF +$(get_zones) +EOF + + [ "$FOUND" -eq 0 ] && echo "NONE" + ;; + + -n) + # GETNEXT request - return next OID after requested + REQ_OID="$2" + + # Create temporary file with all OIDs + TMP_FILE="/tmp/snmp_oids.$$" + > "$TMP_FILE" + + get_zones | while IFS=':' read idx name temp; do + # Pad index to ensure proper numeric sorting + # Format: column.index where index is zero-padded to 3 digits + printf "%d.%03d|$BASE_OID.1.$idx|integer|$idx\n" 1 $idx >> "$TMP_FILE" + printf "%d.%03d|$BASE_OID.2.$idx|string|$name\n" 2 $idx >> "$TMP_FILE" + printf "%d.%03d|$BASE_OID.3.$idx|gauge|$temp\n" 3 $idx >> "$TMP_FILE" + done + + # Sort by our padded key, then extract and compare OIDs + sort -t'|' -k1 "$TMP_FILE" | cut -d'|' -f2- | while IFS='|' read oid type value; do + # Use awk for proper numeric OID comparison + is_greater=$(awk -v req="$REQ_OID" -v curr="$oid" ' + BEGIN { + # Split OIDs into arrays + split(req, req_parts, "."); + split(curr, curr_parts, "."); + + # Compare each part numerically + for (i = 1; i <= length(curr_parts); i++) { + req_val = (i <= length(req_parts)) ? req_parts[i] : 0; + curr_val = curr_parts[i]; + + if (curr_val > req_val) { + print "1"; + exit; + } else if (curr_val < req_val) { + print "0"; + exit; + } + } + print "0"; + } + ') + + if [ "$is_greater" = "1" ]; then + echo "$oid" + echo "$type" + echo "$value" + rm -f "$TMP_FILE" + exit 0 + fi + done + + rm -f "$TMP_FILE" + echo "NONE" + ;; + + *) + echo "Usage: $0 -g|-n OID" >&2 + exit 1 + ;; +esac diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh new file mode 100644 index 000000000..bbbc12413 --- /dev/null +++ b/snmp/Openwrt/setup-snmpd.sh @@ -0,0 +1,91 @@ +#!/bin/sh + +# setup-snmpd.sh +# Installation and configuration script for OpenWrt SNMP monitoring +# This script sets up all necessary scripts and generates the snmpd config + +SCRIPT_DIR="/etc/librenms" +BACKUP_DIR="/etc/librenms/backup" + +echo "OpenWrt SNMPD Setup Script" +echo "==========================" +echo "" + +# Create directories +echo "Creating directories..." +mkdir -p "$SCRIPT_DIR" +mkdir -p "$BACKUP_DIR" + +# Backup existing config if it exists +if [ -f /etc/config/snmpd ]; then + timestamp=$(date +%Y%m%d_%H%M%S) + echo "Backing up existing /etc/config/snmpd to $BACKUP_DIR/snmpd.$timestamp" + cp /etc/config/snmpd "$BACKUP_DIR/snmpd.$timestamp" +fi + +# Copy scripts to /etc/librenms/ +echo "Installing monitoring scripts to $SCRIPT_DIR..." + +scripts="wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh distro.sh cleanup-and-fix.sh snmpd-config-generator.sh" + +for script in $scripts; do + if [ -f "$script" ]; then + cp "$script" "$SCRIPT_DIR/" + chmod +x "$SCRIPT_DIR/$script" + echo " ✓ Installed $script" + else + echo " ✗ Warning: $script not found in current directory" + fi +done + +# Generate wlInterfaces.txt +echo "" +echo "Generating wlInterfaces.txt..." +"$SCRIPT_DIR/wlClients.sh" > /dev/null 2>&1 +if [ -f "$SCRIPT_DIR/wlInterfaces.txt" ]; then + echo " ✓ Generated $SCRIPT_DIR/wlInterfaces.txt" + cat "$SCRIPT_DIR/wlInterfaces.txt" +else + echo " ✗ Failed to generate wlInterfaces.txt" +fi + +# Generate sample config +echo "" +echo "Generating SNMPD configuration..." +echo "Run the following command to see the generated config:" +echo "" +echo " $SCRIPT_DIR/snmpd-config-generator.sh" +echo "" +echo "To apply the configuration:" +echo " 1. Backup your current config: cp /etc/config/snmpd /etc/config/snmpd.backup" +echo " 2. Edit /etc/config/snmpd and add the generated sections" +echo " 3. Restart snmpd: /etc/init.d/snmpd restart" +echo "" +echo "Setup complete!" + +# Ask for confirmation +printf "Do you want to update the SNMP configuration? [Y/n]: " +read -r answer + +# Convert to lowercase and check (default to 'y' if empty) +answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]') + +if [ -z "$answer" ] || [ "$answer" = "y" ]; then + echo "Updating snmpd configuration..." + + # 1. Backup existing config + cp /etc/config/snmpd /etc/config/snmpd-backup + + # 2. Append generated config + # Ensure the generator script is executable + chmod +x "$SCRIPT_DIR/snmpd-config-generator.sh" + "$SCRIPT_DIR/snmpd-config-generator.sh" >> /etc/config/snmpd + + # 3. Restart the service + /etc/init.d/snmpd restart + + echo "Done! Service restarted." +else + echo "Aborted. No changes made." + exit 1 +fi diff --git a/snmp/Openwrt/snmpd-config-generator.sh b/snmp/Openwrt/snmpd-config-generator.sh new file mode 100644 index 000000000..b6e1e38c3 --- /dev/null +++ b/snmp/Openwrt/snmpd-config-generator.sh @@ -0,0 +1,91 @@ +#!/bin/sh + +# snmpd-config-generator.sh +# Generates SNMP extend entries for all wireless interfaces dynamically +# Usage: Run this script to generate UCI config commands for /etc/config/snmpd + +SCRIPT_DIR="/etc/librenms" +INTERFACES_FILE="$SCRIPT_DIR/wlInterfaces.txt" + +# Ensure wlInterfaces.txt exists +if [ ! -f "$INTERFACES_FILE" ]; then + echo "Generating $INTERFACES_FILE..." + $SCRIPT_DIR/wlClients.sh >/dev/null 2>&1 +fi + +# Read interfaces +if [ ! -f "$INTERFACES_FILE" ]; then + echo "Error: Could not find or generate $INTERFACES_FILE" + exit 1 +fi + +# Generate config for each interface +cat "$INTERFACES_FILE" | while IFS=',' read -r iface ssid; do + [ -z "$iface" ] && continue + + # Sanitize interface name for use in UCI names (replace - with _) + safe_name=$(echo "$iface" | tr '-' '_') + + echo "" + echo "# Interface: $iface ($ssid)" + echo "" + + # Clients + echo "config extend" + echo " option name 'clients-$iface'" + echo " option prog '$SCRIPT_DIR/wlClients.sh'" + echo " option args '$iface'" + echo "" + + # Frequency + echo "config extend" + echo " option name 'frequency-$iface'" + echo " option prog '$SCRIPT_DIR/wlFrequency.sh'" + echo " option args '$iface'" + echo "" + + # Rate TX (min, avg, max) + for stat in min avg max; do + echo "config extend" + echo " option name 'rate-tx-$iface-$stat'" + echo " option prog '$SCRIPT_DIR/wlRate.sh'" + echo " option args '$iface tx $stat'" + echo "" + done + + # Rate RX (min, avg, max) + for stat in min avg max; do + echo "config extend" + echo " option name 'rate-rx-$iface-$stat'" + echo " option prog '$SCRIPT_DIR/wlRate.sh'" + echo " option args '$iface rx $stat'" + echo "" + done + + # Noise floor + echo "config extend" + echo " option name 'noise-floor-$iface'" + echo " option prog '$SCRIPT_DIR/wlNoiseFloor.sh'" + echo " option args '$iface'" + echo "" + + # SNR (min, avg, max) + for stat in min avg max; do + echo "config extend" + echo " option name 'snr-$iface-$stat'" + echo " option prog '$SCRIPT_DIR/wlSNR.sh'" + echo " option args '$iface $stat'" + echo "" + done +done + +# Generate thermal sensor config using pass (LM-SENSORS-MIB) +echo "" +echo "# Thermal Sensors (LM-SENSORS-MIB via pass)" +echo "" + +echo "config pass" +echo " option name 'lm-sensors'" +echo " option prog '$SCRIPT_DIR/lm-sensors-pass.sh'" +echo " option miboid '.1.3.6.1.4.1.2021.13.16.2.1'" +echo "" diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index 72e3694cb..1297bcb59 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -2,23 +2,84 @@ # wlClients.sh # Counts connected (associated) Wi-Fi devices -# Arguments: targed interface. Assumes all interfaces if no argument +# Arguments: target interface. Assumes all interfaces if no argument +# Auto-generates wlInterfaces.txt if it doesn't exist + +# Get path to this script (ash-compatible) +scriptdir="$(cd "$(dirname "$0")" && pwd)" +interfaces_file="$scriptdir/wlInterfaces.txt" + +# Function to auto-detect and generate wlInterfaces.txt +generate_interfaces_file() { + local tmpfile="$interfaces_file.tmp" + + # Find all wireless interfaces that are actually in use + for dev in /sys/class/net/*; do + iface=$(basename "$dev") + + # Skip known non-client interfaces + case "$iface" in + mld*|wifi*|phy*|wlan-*|mon.*) + continue + ;; + esac + + # Check if it's a wireless interface + if [ -d "$dev/wireless" ] || [ -d "$dev/phy80211" ]; then + # Try to get SSID using iw first + ssid=$(/usr/sbin/iw dev "$iface" info 2>/dev/null | /bin/grep ssid | /usr/bin/cut -f 2 -s -d" " | /usr/bin/tr -d '\n') + + # If no SSID from iw, try iwinfo + if [ -z "$ssid" ]; then + ssid=$(/usr/bin/iwinfo "$iface" info 2>/dev/null | /bin/grep "ESSID" | /usr/bin/cut -d'"' -f2) + fi + + # Skip interfaces without SSID (not active AP/client interfaces) + [ -z "$ssid" ] && continue + + # Skip interfaces with "unknown" SSID + [ "$ssid" = "unknown" ] && continue + + # Check if interface is UP + if ! /sbin/ip link show "$iface" 2>/dev/null | /bin/grep -q "UP"; then + continue + fi + + echo "$iface,$ssid" >> "$tmpfile" + fi + done + + # Only replace if we found interfaces + if [ -s "$tmpfile" ]; then + mv "$tmpfile" "$interfaces_file" + return 0 + else + rm -f "$tmpfile" + return 1 + fi +} + +# Check if wlInterfaces.txt exists, generate if not +if [ ! -f "$interfaces_file" ]; then + generate_interfaces_file + if [ $? -ne 0 ]; then + /bin/echo "Error: Could not generate $interfaces_file and file does not exist" + exit 1 + fi +fi # Check number of arguments if [ $# -gt 1 ]; then - /bin/echo "Usage: wlClients.sh interface" + /bin/echo "Usage: wlClients.sh [interface]" /bin/echo "Too many command line arguments, exiting." exit 1 fi -# Get path to this script -scriptdir=$(dirname "$(readlink -f -- "$0")") - # Get interface list. Set target, which is name returned for interface if [ "$1" ]; then interfaces=$1 else - interfaces=$(cat "$scriptdir"/wlInterfaces.txt | cut -f 1 -d",") + interfaces=$(cat "$interfaces_file" | cut -f 1 -d",") fi # Count associated devices @@ -26,7 +87,7 @@ count=0 for interface in $interfaces do new=$(/usr/sbin/iw dev "$interface" station dump 2>/dev/null | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) - count=$(( $count + $new )) + count=$(( count + new )) done # Return snmp result diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index 4552cc4b7..e4bcd1cac 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -2,7 +2,7 @@ # wlFrequency.sh # Returns wlFrequency, in MHz (not channel number) -# Arguments: targed interface +# Arguments: target interface # Check number of arguments if [ $# -ne 1 ]; then diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index 102cde435..9f646954c 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -2,7 +2,7 @@ # wlNoiseFloor.sh # Returns wlNoiseFloor, in dBm -# Arguments: targed interface +# Arguments: target interface # Check number of arguments if [ $# -ne 1 ]; then From 25c4fefa07b41f3632ebb6585ec22f2e0cbd556b Mon Sep 17 00:00:00 2001 From: perceival Date: Sun, 22 Feb 2026 08:26:22 +0100 Subject: [PATCH 479/497] Added openwrt temperature monitoring and other minor updates. Coupled with relevant librenms PR. --- snmp/Openwrt/LICENSE | 339 ---------------------------------- snmp/Openwrt/setup-snmpd.sh | 2 + snmp/Openwrt/wlClients.sh | 29 +-- snmp/Openwrt/wlInterfaces.txt | 2 - 4 files changed, 20 insertions(+), 352 deletions(-) delete mode 100644 snmp/Openwrt/LICENSE delete mode 100755 snmp/Openwrt/wlInterfaces.txt diff --git a/snmp/Openwrt/LICENSE b/snmp/Openwrt/LICENSE deleted file mode 100644 index d159169d1..000000000 --- a/snmp/Openwrt/LICENSE +++ /dev/null @@ -1,339 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 2, June 1991 - - Copyright (C) 1989, 1991 Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The licenses for most software are designed to take away your -freedom to share and change it. By contrast, the GNU General Public -License is intended to guarantee your freedom to share and change free -software--to make sure the software is free for all its users. This -General Public License applies to most of the Free Software -Foundation's software and to any other program whose authors commit to -using it. (Some other Free Software Foundation software is covered by -the GNU Lesser General Public License instead.) You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -this service if you wish), that you receive source code or can get it -if you want it, that you can change the software or use pieces of it -in new free programs; and that you know you can do these things. - - To protect your rights, we need to make restrictions that forbid -anyone to deny you these rights or to ask you to surrender the rights. -These restrictions translate to certain responsibilities for you if you -distribute copies of the software, or if you modify it. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must give the recipients all the rights that -you have. You must make sure that they, too, receive or can get the -source code. And you must show them these terms so they know their -rights. - - We protect your rights with two steps: (1) copyright the software, and -(2) offer you this license which gives you legal permission to copy, -distribute and/or modify the software. - - Also, for each author's protection and ours, we want to make certain -that everyone understands that there is no warranty for this free -software. If the software is modified by someone else and passed on, we -want its recipients to know that what they have is not the original, so -that any problems introduced by others will not reflect on the original -authors' reputations. - - Finally, any free program is threatened constantly by software -patents. We wish to avoid the danger that redistributors of a free -program will individually obtain patent licenses, in effect making the -program proprietary. To prevent this, we have made it clear that any -patent must be licensed for everyone's free use or not licensed at all. - - The precise terms and conditions for copying, distribution and -modification follow. - - GNU GENERAL PUBLIC LICENSE - TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION - - 0. This License applies to any program or other work which contains -a notice placed by the copyright holder saying it may be distributed -under the terms of this General Public License. The "Program", below, -refers to any such program or work, and a "work based on the Program" -means either the Program or any derivative work under copyright law: -that is to say, a work containing the Program or a portion of it, -either verbatim or with modifications and/or translated into another -language. (Hereinafter, translation is included without limitation in -the term "modification".) Each licensee is addressed as "you". - -Activities other than copying, distribution and modification are not -covered by this License; they are outside its scope. The act of -running the Program is not restricted, and the output from the Program -is covered only if its contents constitute a work based on the -Program (independent of having been made by running the Program). -Whether that is true depends on what the Program does. - - 1. You may copy and distribute verbatim copies of the Program's -source code as you receive it, in any medium, provided that you -conspicuously and appropriately publish on each copy an appropriate -copyright notice and disclaimer of warranty; keep intact all the -notices that refer to this License and to the absence of any warranty; -and give any other recipients of the Program a copy of this License -along with the Program. - -You may charge a fee for the physical act of transferring a copy, and -you may at your option offer warranty protection in exchange for a fee. - - 2. You may modify your copy or copies of the Program or any portion -of it, thus forming a work based on the Program, and copy and -distribute such modifications or work under the terms of Section 1 -above, provided that you also meet all of these conditions: - - a) You must cause the modified files to carry prominent notices - stating that you changed the files and the date of any change. - - b) You must cause any work that you distribute or publish, that in - whole or in part contains or is derived from the Program or any - part thereof, to be licensed as a whole at no charge to all third - parties under the terms of this License. - - c) If the modified program normally reads commands interactively - when run, you must cause it, when started running for such - interactive use in the most ordinary way, to print or display an - announcement including an appropriate copyright notice and a - notice that there is no warranty (or else, saying that you provide - a warranty) and that users may redistribute the program under - these conditions, and telling the user how to view a copy of this - License. (Exception: if the Program itself is interactive but - does not normally print such an announcement, your work based on - the Program is not required to print an announcement.) - -These requirements apply to the modified work as a whole. If -identifiable sections of that work are not derived from the Program, -and can be reasonably considered independent and separate works in -themselves, then this License, and its terms, do not apply to those -sections when you distribute them as separate works. But when you -distribute the same sections as part of a whole which is a work based -on the Program, the distribution of the whole must be on the terms of -this License, whose permissions for other licensees extend to the -entire whole, and thus to each and every part regardless of who wrote it. - -Thus, it is not the intent of this section to claim rights or contest -your rights to work written entirely by you; rather, the intent is to -exercise the right to control the distribution of derivative or -collective works based on the Program. - -In addition, mere aggregation of another work not based on the Program -with the Program (or with a work based on the Program) on a volume of -a storage or distribution medium does not bring the other work under -the scope of this License. - - 3. You may copy and distribute the Program (or a work based on it, -under Section 2) in object code or executable form under the terms of -Sections 1 and 2 above provided that you also do one of the following: - - a) Accompany it with the complete corresponding machine-readable - source code, which must be distributed under the terms of Sections - 1 and 2 above on a medium customarily used for software interchange; or, - - b) Accompany it with a written offer, valid for at least three - years, to give any third party, for a charge no more than your - cost of physically performing source distribution, a complete - machine-readable copy of the corresponding source code, to be - distributed under the terms of Sections 1 and 2 above on a medium - customarily used for software interchange; or, - - c) Accompany it with the information you received as to the offer - to distribute corresponding source code. (This alternative is - allowed only for noncommercial distribution and only if you - received the program in object code or executable form with such - an offer, in accord with Subsection b above.) - -The source code for a work means the preferred form of the work for -making modifications to it. For an executable work, complete source -code means all the source code for all modules it contains, plus any -associated interface definition files, plus the scripts used to -control compilation and installation of the executable. However, as a -special exception, the source code distributed need not include -anything that is normally distributed (in either source or binary -form) with the major components (compiler, kernel, and so on) of the -operating system on which the executable runs, unless that component -itself accompanies the executable. - -If distribution of executable or object code is made by offering -access to copy from a designated place, then offering equivalent -access to copy the source code from the same place counts as -distribution of the source code, even though third parties are not -compelled to copy the source along with the object code. - - 4. You may not copy, modify, sublicense, or distribute the Program -except as expressly provided under this License. Any attempt -otherwise to copy, modify, sublicense or distribute the Program is -void, and will automatically terminate your rights under this License. -However, parties who have received copies, or rights, from you under -this License will not have their licenses terminated so long as such -parties remain in full compliance. - - 5. You are not required to accept this License, since you have not -signed it. However, nothing else grants you permission to modify or -distribute the Program or its derivative works. These actions are -prohibited by law if you do not accept this License. Therefore, by -modifying or distributing the Program (or any work based on the -Program), you indicate your acceptance of this License to do so, and -all its terms and conditions for copying, distributing or modifying -the Program or works based on it. - - 6. Each time you redistribute the Program (or any work based on the -Program), the recipient automatically receives a license from the -original licensor to copy, distribute or modify the Program subject to -these terms and conditions. You may not impose any further -restrictions on the recipients' exercise of the rights granted herein. -You are not responsible for enforcing compliance by third parties to -this License. - - 7. If, as a consequence of a court judgment or allegation of patent -infringement or for any other reason (not limited to patent issues), -conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot -distribute so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you -may not distribute the Program at all. For example, if a patent -license would not permit royalty-free redistribution of the Program by -all those who receive copies directly or indirectly through you, then -the only way you could satisfy both it and this License would be to -refrain entirely from distribution of the Program. - -If any portion of this section is held invalid or unenforceable under -any particular circumstance, the balance of the section is intended to -apply and the section as a whole is intended to apply in other -circumstances. - -It is not the purpose of this section to induce you to infringe any -patents or other property right claims or to contest validity of any -such claims; this section has the sole purpose of protecting the -integrity of the free software distribution system, which is -implemented by public license practices. Many people have made -generous contributions to the wide range of software distributed -through that system in reliance on consistent application of that -system; it is up to the author/donor to decide if he or she is willing -to distribute software through any other system and a licensee cannot -impose that choice. - -This section is intended to make thoroughly clear what is believed to -be a consequence of the rest of this License. - - 8. If the distribution and/or use of the Program is restricted in -certain countries either by patents or by copyrighted interfaces, the -original copyright holder who places the Program under this License -may add an explicit geographical distribution limitation excluding -those countries, so that distribution is permitted only in or among -countries not thus excluded. In such case, this License incorporates -the limitation as if written in the body of this License. - - 9. The Free Software Foundation may publish revised and/or new versions -of the General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - -Each version is given a distinguishing version number. If the Program -specifies a version number of this License which applies to it and "any -later version", you have the option of following the terms and conditions -either of that version or of any later version published by the Free -Software Foundation. If the Program does not specify a version number of -this License, you may choose any version ever published by the Free Software -Foundation. - - 10. If you wish to incorporate parts of the Program into other free -programs whose distribution conditions are different, write to the author -to ask for permission. For software which is copyrighted by the Free -Software Foundation, write to the Free Software Foundation; we sometimes -make exceptions for this. Our decision will be guided by the two goals -of preserving the free status of all derivatives of our free software and -of promoting the sharing and reuse of software generally. - - NO WARRANTY - - 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY -FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN -OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES -PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED -OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS -TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE -PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, -REPAIR OR CORRECTION. - - 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR -REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, -INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING -OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED -TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY -YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER -PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE -POSSIBILITY OF SUCH DAMAGES. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -convey the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software; you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation; either version 2 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License along - with this program; if not, write to the Free Software Foundation, Inc., - 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. - -Also add information on how to contact you by electronic and paper mail. - -If the program is interactive, make it output a short notice like this -when it starts in an interactive mode: - - Gnomovision version 69, Copyright (C) year name of author - Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, the commands you use may -be called something other than `show w' and `show c'; they could even be -mouse-clicks or menu items--whatever suits your program. - -You should also get your employer (if you work as a programmer) or your -school, if any, to sign a "copyright disclaimer" for the program, if -necessary. Here is a sample; alter the names: - - Yoyodyne, Inc., hereby disclaims all copyright interest in the program - `Gnomovision' (which makes passes at compilers) written by James Hacker. - - , 1 April 1989 - Ty Coon, President of Vice - -This General Public License does not permit incorporating your program into -proprietary programs. If your program is a subroutine library, you may -consider it more useful to permit linking proprietary applications with the -library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index bbbc12413..adda05375 100644 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -41,6 +41,8 @@ done # Generate wlInterfaces.txt echo "" echo "Generating wlInterfaces.txt..." +# Force regeneration by removing old file +rm -f "$SCRIPT_DIR/wlInterfaces.txt" "$SCRIPT_DIR/wlClients.sh" > /dev/null 2>&1 if [ -f "$SCRIPT_DIR/wlInterfaces.txt" ]; then echo " ✓ Generated $SCRIPT_DIR/wlInterfaces.txt" diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index 1297bcb59..0e9579e82 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -19,32 +19,39 @@ generate_interfaces_file() { # Skip known non-client interfaces case "$iface" in - mld*|wifi*|phy*|wlan-*|mon.*) + mld*|mon.*) #|wifi*|phy*|wlan-* continue ;; esac # Check if it's a wireless interface if [ -d "$dev/wireless" ] || [ -d "$dev/phy80211" ]; then - # Try to get SSID using iw first - ssid=$(/usr/sbin/iw dev "$iface" info 2>/dev/null | /bin/grep ssid | /usr/bin/cut -f 2 -s -d" " | /usr/bin/tr -d '\n') + # Get interface type and SSID using iw first + iw_info=$(/usr/sbin/iw dev "$iface" info 2>/dev/null) + iface_type=$(echo "$iw_info" | /usr/bin/awk '/^[[:space:]]*type / {print $2; exit}') + ssid=$(echo "$iw_info" | /bin/grep ssid | /usr/bin/cut -f 2 -s -d" " | /usr/bin/tr -d '\n') + + # Skip AP/VLAN interfaces which can report "ESSID: unknown" + [ "$iface_type" = "AP/VLAN" ] && continue # If no SSID from iw, try iwinfo if [ -z "$ssid" ]; then - ssid=$(/usr/bin/iwinfo "$iface" info 2>/dev/null | /bin/grep "ESSID" | /usr/bin/cut -d'"' -f2) + ssid=$(/usr/bin/iwinfo "$iface" info 2>/dev/null | /bin/sed -n \ + -e 's/.*ESSID: "\(.*\)".*/\1/p' \ + -e 's/.*ESSID: \(.*\)$/\1/p' | /usr/bin/head -n 1) fi # Skip interfaces without SSID (not active AP/client interfaces) [ -z "$ssid" ] && continue - # Skip interfaces with "unknown" SSID - [ "$ssid" = "unknown" ] && continue - - # Check if interface is UP - if ! /sbin/ip link show "$iface" 2>/dev/null | /bin/grep -q "UP"; then - continue - fi + # Skip malformed or unknown SSIDs + case "$ssid" in + unknown|*ESSID:*) + continue + ;; + esac + # Add to list (include even if DOWN, since SSID means it's configured) echo "$iface,$ssid" >> "$tmpfile" fi done diff --git a/snmp/Openwrt/wlInterfaces.txt b/snmp/Openwrt/wlInterfaces.txt deleted file mode 100755 index bfe882e0e..000000000 --- a/snmp/Openwrt/wlInterfaces.txt +++ /dev/null @@ -1,2 +0,0 @@ -wlan0,wl-2.4G -wlan1,wl-5.0G From 1e4acd410229fe3e9941a0d9fb7690379d16bdb5 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 05:43:51 +0100 Subject: [PATCH 480/497] OpenWrt agent: replace stale snmpd blocks and drop local README --- snmp/Openwrt/LICENSE | 339 +++++++++++++++++++++++++ snmp/Openwrt/README.md | 263 ------------------- snmp/Openwrt/cleanup-and-fix.sh | 31 --- snmp/Openwrt/distro.sh | 0 snmp/Openwrt/lm-sensors-pass.sh | 0 snmp/Openwrt/setup-snmpd.sh | 63 +++-- snmp/Openwrt/snmpd-base-config | 116 +++++++++ snmp/Openwrt/snmpd-config-generator.sh | 150 +++++------ snmp/Openwrt/wlClients.sh | 227 ++++++++++------- snmp/Openwrt/wlFrequency.sh | 3 +- snmp/Openwrt/wlInterfaces.sh | 206 +++++++++++++++ snmp/Openwrt/wlNoiseFloor.sh | 3 +- snmp/Openwrt/wlRate.sh | 27 +- snmp/Openwrt/wlSNR.sh | 25 +- 14 files changed, 942 insertions(+), 511 deletions(-) create mode 100644 snmp/Openwrt/LICENSE delete mode 100644 snmp/Openwrt/README.md delete mode 100644 snmp/Openwrt/cleanup-and-fix.sh mode change 100644 => 100755 snmp/Openwrt/distro.sh mode change 100644 => 100755 snmp/Openwrt/lm-sensors-pass.sh mode change 100644 => 100755 snmp/Openwrt/setup-snmpd.sh create mode 100644 snmp/Openwrt/snmpd-base-config mode change 100644 => 100755 snmp/Openwrt/snmpd-config-generator.sh create mode 100755 snmp/Openwrt/wlInterfaces.sh diff --git a/snmp/Openwrt/LICENSE b/snmp/Openwrt/LICENSE new file mode 100644 index 000000000..d159169d1 --- /dev/null +++ b/snmp/Openwrt/LICENSE @@ -0,0 +1,339 @@ + GNU GENERAL PUBLIC LICENSE + Version 2, June 1991 + + Copyright (C) 1989, 1991 Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The licenses for most software are designed to take away your +freedom to share and change it. By contrast, the GNU General Public +License is intended to guarantee your freedom to share and change free +software--to make sure the software is free for all its users. This +General Public License applies to most of the Free Software +Foundation's software and to any other program whose authors commit to +using it. (Some other Free Software Foundation software is covered by +the GNU Lesser General Public License instead.) You can apply it to +your programs, too. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +this service if you wish), that you receive source code or can get it +if you want it, that you can change the software or use pieces of it +in new free programs; and that you know you can do these things. + + To protect your rights, we need to make restrictions that forbid +anyone to deny you these rights or to ask you to surrender the rights. +These restrictions translate to certain responsibilities for you if you +distribute copies of the software, or if you modify it. + + For example, if you distribute copies of such a program, whether +gratis or for a fee, you must give the recipients all the rights that +you have. You must make sure that they, too, receive or can get the +source code. And you must show them these terms so they know their +rights. + + We protect your rights with two steps: (1) copyright the software, and +(2) offer you this license which gives you legal permission to copy, +distribute and/or modify the software. + + Also, for each author's protection and ours, we want to make certain +that everyone understands that there is no warranty for this free +software. If the software is modified by someone else and passed on, we +want its recipients to know that what they have is not the original, so +that any problems introduced by others will not reflect on the original +authors' reputations. + + Finally, any free program is threatened constantly by software +patents. We wish to avoid the danger that redistributors of a free +program will individually obtain patent licenses, in effect making the +program proprietary. To prevent this, we have made it clear that any +patent must be licensed for everyone's free use or not licensed at all. + + The precise terms and conditions for copying, distribution and +modification follow. + + GNU GENERAL PUBLIC LICENSE + TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION + + 0. This License applies to any program or other work which contains +a notice placed by the copyright holder saying it may be distributed +under the terms of this General Public License. The "Program", below, +refers to any such program or work, and a "work based on the Program" +means either the Program or any derivative work under copyright law: +that is to say, a work containing the Program or a portion of it, +either verbatim or with modifications and/or translated into another +language. (Hereinafter, translation is included without limitation in +the term "modification".) Each licensee is addressed as "you". + +Activities other than copying, distribution and modification are not +covered by this License; they are outside its scope. The act of +running the Program is not restricted, and the output from the Program +is covered only if its contents constitute a work based on the +Program (independent of having been made by running the Program). +Whether that is true depends on what the Program does. + + 1. You may copy and distribute verbatim copies of the Program's +source code as you receive it, in any medium, provided that you +conspicuously and appropriately publish on each copy an appropriate +copyright notice and disclaimer of warranty; keep intact all the +notices that refer to this License and to the absence of any warranty; +and give any other recipients of the Program a copy of this License +along with the Program. + +You may charge a fee for the physical act of transferring a copy, and +you may at your option offer warranty protection in exchange for a fee. + + 2. You may modify your copy or copies of the Program or any portion +of it, thus forming a work based on the Program, and copy and +distribute such modifications or work under the terms of Section 1 +above, provided that you also meet all of these conditions: + + a) You must cause the modified files to carry prominent notices + stating that you changed the files and the date of any change. + + b) You must cause any work that you distribute or publish, that in + whole or in part contains or is derived from the Program or any + part thereof, to be licensed as a whole at no charge to all third + parties under the terms of this License. + + c) If the modified program normally reads commands interactively + when run, you must cause it, when started running for such + interactive use in the most ordinary way, to print or display an + announcement including an appropriate copyright notice and a + notice that there is no warranty (or else, saying that you provide + a warranty) and that users may redistribute the program under + these conditions, and telling the user how to view a copy of this + License. (Exception: if the Program itself is interactive but + does not normally print such an announcement, your work based on + the Program is not required to print an announcement.) + +These requirements apply to the modified work as a whole. If +identifiable sections of that work are not derived from the Program, +and can be reasonably considered independent and separate works in +themselves, then this License, and its terms, do not apply to those +sections when you distribute them as separate works. But when you +distribute the same sections as part of a whole which is a work based +on the Program, the distribution of the whole must be on the terms of +this License, whose permissions for other licensees extend to the +entire whole, and thus to each and every part regardless of who wrote it. + +Thus, it is not the intent of this section to claim rights or contest +your rights to work written entirely by you; rather, the intent is to +exercise the right to control the distribution of derivative or +collective works based on the Program. + +In addition, mere aggregation of another work not based on the Program +with the Program (or with a work based on the Program) on a volume of +a storage or distribution medium does not bring the other work under +the scope of this License. + + 3. You may copy and distribute the Program (or a work based on it, +under Section 2) in object code or executable form under the terms of +Sections 1 and 2 above provided that you also do one of the following: + + a) Accompany it with the complete corresponding machine-readable + source code, which must be distributed under the terms of Sections + 1 and 2 above on a medium customarily used for software interchange; or, + + b) Accompany it with a written offer, valid for at least three + years, to give any third party, for a charge no more than your + cost of physically performing source distribution, a complete + machine-readable copy of the corresponding source code, to be + distributed under the terms of Sections 1 and 2 above on a medium + customarily used for software interchange; or, + + c) Accompany it with the information you received as to the offer + to distribute corresponding source code. (This alternative is + allowed only for noncommercial distribution and only if you + received the program in object code or executable form with such + an offer, in accord with Subsection b above.) + +The source code for a work means the preferred form of the work for +making modifications to it. For an executable work, complete source +code means all the source code for all modules it contains, plus any +associated interface definition files, plus the scripts used to +control compilation and installation of the executable. However, as a +special exception, the source code distributed need not include +anything that is normally distributed (in either source or binary +form) with the major components (compiler, kernel, and so on) of the +operating system on which the executable runs, unless that component +itself accompanies the executable. + +If distribution of executable or object code is made by offering +access to copy from a designated place, then offering equivalent +access to copy the source code from the same place counts as +distribution of the source code, even though third parties are not +compelled to copy the source along with the object code. + + 4. You may not copy, modify, sublicense, or distribute the Program +except as expressly provided under this License. Any attempt +otherwise to copy, modify, sublicense or distribute the Program is +void, and will automatically terminate your rights under this License. +However, parties who have received copies, or rights, from you under +this License will not have their licenses terminated so long as such +parties remain in full compliance. + + 5. You are not required to accept this License, since you have not +signed it. However, nothing else grants you permission to modify or +distribute the Program or its derivative works. These actions are +prohibited by law if you do not accept this License. Therefore, by +modifying or distributing the Program (or any work based on the +Program), you indicate your acceptance of this License to do so, and +all its terms and conditions for copying, distributing or modifying +the Program or works based on it. + + 6. Each time you redistribute the Program (or any work based on the +Program), the recipient automatically receives a license from the +original licensor to copy, distribute or modify the Program subject to +these terms and conditions. You may not impose any further +restrictions on the recipients' exercise of the rights granted herein. +You are not responsible for enforcing compliance by third parties to +this License. + + 7. If, as a consequence of a court judgment or allegation of patent +infringement or for any other reason (not limited to patent issues), +conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot +distribute so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you +may not distribute the Program at all. For example, if a patent +license would not permit royalty-free redistribution of the Program by +all those who receive copies directly or indirectly through you, then +the only way you could satisfy both it and this License would be to +refrain entirely from distribution of the Program. + +If any portion of this section is held invalid or unenforceable under +any particular circumstance, the balance of the section is intended to +apply and the section as a whole is intended to apply in other +circumstances. + +It is not the purpose of this section to induce you to infringe any +patents or other property right claims or to contest validity of any +such claims; this section has the sole purpose of protecting the +integrity of the free software distribution system, which is +implemented by public license practices. Many people have made +generous contributions to the wide range of software distributed +through that system in reliance on consistent application of that +system; it is up to the author/donor to decide if he or she is willing +to distribute software through any other system and a licensee cannot +impose that choice. + +This section is intended to make thoroughly clear what is believed to +be a consequence of the rest of this License. + + 8. If the distribution and/or use of the Program is restricted in +certain countries either by patents or by copyrighted interfaces, the +original copyright holder who places the Program under this License +may add an explicit geographical distribution limitation excluding +those countries, so that distribution is permitted only in or among +countries not thus excluded. In such case, this License incorporates +the limitation as if written in the body of this License. + + 9. The Free Software Foundation may publish revised and/or new versions +of the General Public License from time to time. Such new versions will +be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + +Each version is given a distinguishing version number. If the Program +specifies a version number of this License which applies to it and "any +later version", you have the option of following the terms and conditions +either of that version or of any later version published by the Free +Software Foundation. If the Program does not specify a version number of +this License, you may choose any version ever published by the Free Software +Foundation. + + 10. If you wish to incorporate parts of the Program into other free +programs whose distribution conditions are different, write to the author +to ask for permission. For software which is copyrighted by the Free +Software Foundation, write to the Free Software Foundation; we sometimes +make exceptions for this. Our decision will be guided by the two goals +of preserving the free status of all derivatives of our free software and +of promoting the sharing and reuse of software generally. + + NO WARRANTY + + 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY +FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN +OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES +PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED +OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS +TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE +PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, +REPAIR OR CORRECTION. + + 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR +REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, +INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING +OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED +TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY +YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER +PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE +POSSIBILITY OF SUCH DAMAGES. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +convey the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software; you can redistribute it and/or modify + it under the terms of the GNU General Public License as published by + the Free Software Foundation; either version 2 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU General Public License for more details. + + You should have received a copy of the GNU General Public License along + with this program; if not, write to the Free Software Foundation, Inc., + 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +Also add information on how to contact you by electronic and paper mail. + +If the program is interactive, make it output a short notice like this +when it starts in an interactive mode: + + Gnomovision version 69, Copyright (C) year name of author + Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. + This is free software, and you are welcome to redistribute it + under certain conditions; type `show c' for details. + +The hypothetical commands `show w' and `show c' should show the appropriate +parts of the General Public License. Of course, the commands you use may +be called something other than `show w' and `show c'; they could even be +mouse-clicks or menu items--whatever suits your program. + +You should also get your employer (if you work as a programmer) or your +school, if any, to sign a "copyright disclaimer" for the program, if +necessary. Here is a sample; alter the names: + + Yoyodyne, Inc., hereby disclaims all copyright interest in the program + `Gnomovision' (which makes passes at compilers) written by James Hacker. + + , 1 April 1989 + Ty Coon, President of Vice + +This General Public License does not permit incorporating your program into +proprietary programs. If your program is a subroutine library, you may +consider it more useful to permit linking proprietary applications with the +library. If this is what you want to do, use the GNU Lesser General +Public License instead of this License. diff --git a/snmp/Openwrt/README.md b/snmp/Openwrt/README.md deleted file mode 100644 index 70f7c2ce2..000000000 --- a/snmp/Openwrt/README.md +++ /dev/null @@ -1,263 +0,0 @@ -# OpenWrt-LibreNMS -SNMPD OpenWrt configuration - integration for OpenWrt devices to be visible wtih more features in LibreNMS network management platform. Based on https://github.com/librenms/librenms-agent/tree/master/snmp/Openwrt - -This package provides unified, auto-detecting SNMP monitoring for OpenWrt devices with support for: -- Wireless interface metrics (clients, frequency, rate, noise, SNR) -- Thermal sensor monitoring via LM-SENSORS-MIB -- Auto-discovery of wireless interfaces -- Dynamic configuration generation - -## Key Features - -### 1. Auto-Generation of wlInterfaces.txt - -`wlClients.sh` auto-detects all wireless interfaces on first run and generates the file automatically. - -```bash -# First run automatically creates wlInterfaces.txt -/etc/librenms/wlClients.sh -``` - -### 2. Dynamic Config Generator -**Problem**: Different devices need different snmpd configs based on their wireless interfaces (ap1: wl0-ap0, wl1-ap0; ap2: wlan0, wlan02, wlan12, wlan22). - -**Solution**: `snmpd-config-generator.sh` reads wlInterfaces.txt and generates appropriate UCI config entries for all detected interfaces. - -```bash -# Generate config for current device -/etc/librenms/snmpd-config-generator.sh -``` - -### 3. Unified Base Configuration -**Problem**: Repetitive config entries across devices. - -**Solution**: /etc/config/snmpd is updated with dynamic content. - -### 4. Error Handling -All scripts have: -- Error messages -- Argument validation -- Fallback behavior -- Consistent exit codes - -### 5. Thermal Sensor Support (LM-SENSORS-MIB) -Uses the `pass` directive to provide proper LM-SENSORS-MIB thermal sensors: -- `.1.3.6.1.4.1.2021.13.16.2.1.1` - lmTempSensorsIndex (INTEGER) -- `.1.3.6.1.4.1.2021.13.16.2.1.2` - lmTempSensorsDevice (STRING) -- `.1.3.6.1.4.1.2021.13.16.2.1.3` - lmTempSensorsValue (Gauge32, millidegrees) - -The `pass` script (`lm-sensors-pass.sh`) provides proper data types and table structure, enabling automatic discovery in LibreNMS. - -## File Structure - -``` -/etc/librenms/ -├── wlInterfaces.txt # Auto-generated interface list -├── wlClients.sh # Count Wi-Fi clients (auto-generates wlInterfaces.txt) -├── wlFrequency.sh # Get operating frequency -├── wlNoiseFloor.sh # Get noise floor -├── wlRate.sh # Get TX/RX rates (min/avg/max) -├── wlSNR.sh # Get SNR (min/avg/max) -├── lm-sensors-pass.sh # LM-SENSORS-MIB pass script for thermal sensors -├── distro.sh # Extract OpenWrt version string -├── cleanup-and-fix.sh # Remove old exec entries -└── snmpd-config-generator.sh # Generate UCI config entries -``` - -## Installation - -### Quick Setup -```bash -# Run setup script -chmod +x setup-snmpd.sh -./setup-snmpd.sh - -### Manual Installation -```bash -# Create directory -mkdir -p /etc/librenms - -# Copy all scripts -cp wl*.sh lm-sensors-pass.sh distro.sh cleanup-and-fix.sh snmpd-config-generator.sh /etc/librenms/ -chmod +x /etc/librenms/*.sh - -# Generate interface list -/etc/librenms/wlClients.sh - -# Generate config -/etc/librenms/snmpd-config-generator.sh -``` - -## Usage - -### Generate wlInterfaces.txt -```bash -# Auto-detect all wireless interfaces -/etc/librenms/wlClients.sh - -# Manually edit if needed -vi /etc/librenms/wlInterfaces.txt -# Format: interface,ssid -# Example: -# wl0-ap0,MySSID -# wlan0,GuestNetwork -``` - -### Generate SNMPD Config -```bash -# Generate all extend entries for detected interfaces -/etc/librenms/snmpd-config-generator.sh - -# Output can be appended to /etc/config/snmpd -/etc/librenms/snmpd-config-generator.sh >> /etc/config/snmpd -``` - -### Test Scripts Individually -```bash -# Test client count -/etc/librenms/wlClients.sh wlan0 - -# Test frequency -/etc/librenms/wlFrequency.sh wlan0 - -# Test rate (interface, direction, stat) -/etc/librenms/wlRate.sh wlan0 tx avg - -# Test SNR (interface, stat) -/etc/librenms/wlSNR.sh wlan0 avg - -# Test thermal sensors (pass script) -/etc/librenms/lm-sensors-pass.sh -g .1.3.6.1.4.1.2021.13.16.2.1.3.0 -``` - -### Query SNMP -```bash -# From monitoring server -snmpwalk -v2c -c public localhost .1.3.6.1.4.1.8072.1.3.2 - -# Specific metrics -snmpget -v2c -c public localhost NET-SNMP-EXTEND-MIB::nsExtendOutput1Line.\"clients-wlan0\" -snmpget -v2c -c public localhost NET-SNMP-EXTEND-MIB::nsExtendOutput1Line.\"frequency-wlan0\" - -# Thermal sensors (LM-SENSORS-MIB) -snmpwalk -v2c -c public localhost LM-SENSORS-MIB::lmTempSensorsValue -``` - -## SNMP OID Reference - -### Wireless Metrics (via nsExtend) -Base OID: `.1.3.6.1.4.1.8072.1.3.2` - -Per interface: -- `clients-` - Connected client count -- `frequency-` - Operating frequency (MHz) -- `rate-tx--min/avg/max` - TX bitrate stats -- `rate-rx--min/avg/max` - RX bitrate stats -- `noise-floor-` - Noise floor (dBm) -- `snr--min/avg/max` - Signal-to-Noise Ratio (dB) - -### Thermal Sensors (LM-Sensors MIB) -- `.1.3.6.1.4.1.2021.13.16.2.1.1` - lmSensorsIndex (INTEGER) -- `.1.3.6.1.4.1.2021.13.16.2.1.2` - lmSensorsDevice (STRING) -- `.1.3.6.1.4.1.2021.13.16.2.1.3` - lmSensorsValue (Gauge32, millidegrees) - -## Configuration Examples - -### Example: 4 Interface Device (like native OpenWrt) -wlInterfaces.txt: -``` -wl0-ap0,IoT -wl0-ap1,guest -wl1-ap0,main -wl1-ap1,uplink -``` - -Generated extends: -- clients-wl0-ap0, clients-wl0-ap1, clients-wl1-ap0, clients-wl1-ap1 -- frequency-wl0-ap0, frequency-wl0-ap1, frequency-wl1-ap0, frequency-wl1-ap1 -- rate-tx-wl0-ap0-min/avg/max (and all other interfaces) -- rate-rx-wl0-ap0-min/avg/max (and all other interfaces) -- And so on... - -### Example: Multi-VLAN Device (like gl.Inet flint3) -wlInterfaces.txt: -``` -wlan0,MainNetwork -wlan02,VLAN2 -wlan12,VLAN12 -wlan22,VLAN22 -``` - -## Troubleshooting - -### wlInterfaces.txt not generated -```bash -# Check for wireless interfaces -ls /sys/class/net/*/wireless -ls /sys/class/net/*/phy80211 - -# Manually create the file -cat > /etc/librenms/wlInterfaces.txt << EOF -wlan0,YourSSID -EOF -``` - -### SNMP not returning data -```bash -# Check if snmpd is running -/etc/init.d/snmpd status - -# Check if scripts are executable -ls -la /etc/librenms/*.sh - -# Test script directly -/etc/librenms/wlClients.sh wlan0 - -# Check snmpd logs -logread | grep snmpd -``` - -### Script errors -```bash -# Enable debug output -sh -x /etc/librenms/wlClients.sh wlan0 - -# Check for required commands -which iw iwinfo awk cut grep -``` - -## Comparison: Before vs After - -### Before (Manual, Per-Device) -❌ Required manual creation of wlInterfaces.txt for each device -❌ Different config file for each device type -❌ Repetitive config entries (280+ lines) -❌ Hard to maintain across multiple devices -❌ Interface changes require manual config updates - -### After (Automated, Unified) -✅ Auto-detects wireless interfaces -✅ Single config generator works for all devices -✅ Generates only needed entries -✅ Easy to maintain and replicate -✅ Interface changes auto-detected on script run - -## Benefits - -1. **Zero Manual Configuration**: Just run setup script -2. **Device-Agnostic**: Works on any OpenWrt device -3. **Self-Documenting**: Auto-generated configs show what's monitored -4. **Easy Replication**: Same process for all devices -5. **Future-Proof**: Adding interfaces doesn't require config changes -6. **Reduced Errors**: No manual typing of repetitive entries -7. **Consistent**: All devices use same monitoring approach - -## Security Notes - -- Default SNMP community strings should be changed in production -- Restrict SNMP access to monitoring network (192.168.0.0/24 in examples) -- Use SNMPv3 for better security if supported by your NMS - -## License - -These scripts are provided as-is for use with OpenWrt systems. diff --git a/snmp/Openwrt/cleanup-and-fix.sh b/snmp/Openwrt/cleanup-and-fix.sh deleted file mode 100644 index 4d4637b8f..000000000 --- a/snmp/Openwrt/cleanup-and-fix.sh +++ /dev/null @@ -1,31 +0,0 @@ -#!/bin/sh - -# cleanup-and-fix.sh -# Run this on the OpenWrt device to: -# 1. Remove old broken exec entries -# 2. Verify pass script is working - -echo "Step 1: Removing old exec entries for lmSensors..." - -# Remove exec entries with miboid containing 2021.13.16 -uci show snmpd | grep "exec.*=exec" | cut -d'.' -f2 | cut -d'=' -f1 | while read idx; do - miboid=$(uci get snmpd.$idx.miboid 2>/dev/null) - if echo "$miboid" | grep -q "2021\.13\.16"; then - echo " Removing snmpd.$idx (miboid: $miboid)" - uci delete snmpd.$idx - fi -done - -uci commit snmpd - -echo "" -echo "Step 2: Verifying pass configuration..." -uci show snmpd | grep "pass.*lm-sensors" - -echo "" -echo "Step 3: Restarting snmpd..." -/etc/init.d/snmpd restart - -echo "" -echo "Done! Now test with:" -echo " snmpwalk -v2c -c public localhost LM-SENSORS-MIB::lmTempSensorsValue" diff --git a/snmp/Openwrt/distro.sh b/snmp/Openwrt/distro.sh old mode 100644 new mode 100755 diff --git a/snmp/Openwrt/lm-sensors-pass.sh b/snmp/Openwrt/lm-sensors-pass.sh old mode 100644 new mode 100755 diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh old mode 100644 new mode 100755 index adda05375..49ada323d --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -11,6 +11,38 @@ echo "OpenWrt SNMPD Setup Script" echo "==========================" echo "" +remove_managed_snmpd_sections() { + tmp_clean=$(mktemp) + awk ' + BEGIN { RS=""; ORS="\n\n" } + { + block = $0 + managed = 0 + + if (block ~ /LIBRENMS_OPENWRT_AUTOGEN_BEGIN/ || block ~ /LIBRENMS_OPENWRT_AUTOGEN_END/) { + managed = 1 + } + if (block ~ /config extend/ && block ~ /option name '\''interfaces'\''/) { + managed = 1 + } + if (block ~ /config extend/ && block ~ /option name '\''clients-wlan'\''/) { + managed = 1 + } + if (block ~ /config extend/ && block ~ /option name '\''(clients|wl-clients|frequency|noise-floor|rate|snr)-[^'\'']+'\''/) { + managed = 1 + } + if (block ~ /config pass/ && (block ~ /option name '\''lm-sensors'\''/ || block ~ /option prog '\''\/etc\/librenms\/lm-sensors-pass.sh'\''/)) { + managed = 1 + } + + if (!managed) { + print block + } + } + ' /etc/config/snmpd > "$tmp_clean" + mv "$tmp_clean" /etc/config/snmpd +} + # Create directories echo "Creating directories..." mkdir -p "$SCRIPT_DIR" @@ -26,7 +58,7 @@ fi # Copy scripts to /etc/librenms/ echo "Installing monitoring scripts to $SCRIPT_DIR..." -scripts="wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh distro.sh cleanup-and-fix.sh snmpd-config-generator.sh" +scripts="wlInterfaces.sh wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh distro.sh snmpd-config-generator.sh" for script in $scripts; do if [ -f "$script" ]; then @@ -38,19 +70,6 @@ for script in $scripts; do fi done -# Generate wlInterfaces.txt -echo "" -echo "Generating wlInterfaces.txt..." -# Force regeneration by removing old file -rm -f "$SCRIPT_DIR/wlInterfaces.txt" -"$SCRIPT_DIR/wlClients.sh" > /dev/null 2>&1 -if [ -f "$SCRIPT_DIR/wlInterfaces.txt" ]; then - echo " ✓ Generated $SCRIPT_DIR/wlInterfaces.txt" - cat "$SCRIPT_DIR/wlInterfaces.txt" -else - echo " ✗ Failed to generate wlInterfaces.txt" -fi - # Generate sample config echo "" echo "Generating SNMPD configuration..." @@ -60,7 +79,7 @@ echo " $SCRIPT_DIR/snmpd-config-generator.sh" echo "" echo "To apply the configuration:" echo " 1. Backup your current config: cp /etc/config/snmpd /etc/config/snmpd.backup" -echo " 2. Edit /etc/config/snmpd and add the generated sections" +echo " 2. Replace old LibreNMS wireless sections with the generated block" echo " 3. Restart snmpd: /etc/init.d/snmpd restart" echo "" echo "Setup complete!" @@ -77,15 +96,17 @@ if [ -z "$answer" ] || [ "$answer" = "y" ]; then # 1. Backup existing config cp /etc/config/snmpd /etc/config/snmpd-backup - - # 2. Append generated config - # Ensure the generator script is executable + + # 2. Remove previously managed LibreNMS wireless sections + remove_managed_snmpd_sections + + # 3. Append one fresh generated config block chmod +x "$SCRIPT_DIR/snmpd-config-generator.sh" "$SCRIPT_DIR/snmpd-config-generator.sh" >> /etc/config/snmpd - - # 3. Restart the service + + # 4. Restart the service /etc/init.d/snmpd restart - + echo "Done! Service restarted." else echo "Aborted. No changes made." diff --git a/snmp/Openwrt/snmpd-base-config b/snmp/Openwrt/snmpd-base-config new file mode 100644 index 000000000..d9a31a61d --- /dev/null +++ b/snmp/Openwrt/snmpd-base-config @@ -0,0 +1,116 @@ + +config agent + option agentaddress 'UDP:161,UDP6:161' + +config agentx + option agentxsocket '/var/run/agentx.sock' + +config com2sec 'public' + option secname 'ro' + option source 'default' + option community 'public' + +config com2sec 'private' + option secname 'rw' + option source 'localhost' + option community 'private' + +config com2sec6 'public6' + option secname 'ro' + option source 'default' + option community 'public' + +config com2sec6 'private6' + option secname 'rw' + option source 'localhost' + option community 'private' + +config group 'public_v1' + option group 'public' + option version 'v1' + option secname 'ro' + +config group 'public_v2c' + option group 'public' + option version 'v2c' + option secname 'ro' + +config group 'public_usm' + option group 'public' + option version 'usm' + option secname 'ro' + +config group 'private_v1' + option group 'private' + option version 'v1' + option secname 'rw' + +config group 'private_v2c' + option group 'private' + option version 'v2c' + option secname 'rw' + +config group 'private_usm' + option group 'private' + option version 'usm' + option secname 'rw' + +config view 'all' + option viewname 'all' + option type 'included' + option oid '.1' + +config access 'public_access' + option group 'public' + option context 'none' + option version 'any' + option level 'noauth' + option prefix 'exact' + option read 'all' + option write 'none' + option notify 'none' + +config access 'private_access' + option group 'private' + option context 'none' + option version 'any' + option level 'noauth' + option prefix 'exact' + option read 'all' + option write 'all' + option notify 'all' + +# System information +config extend + option name 'distro' + option prog '/etc/librenms/distro.sh' + +config extend + option name 'hardware' + option prog '/bin/cat' + option args '/sys/firmware/devicetree/base/model' + +config extend + option name 'interfaces' + option prog '/etc/librenms/wlInterfaces.sh' + +config extend + option name 'clients-wlan' + option prog '/etc/librenms/wlClients.sh' + +# Per-interface wireless metrics +# AUTO-GENERATED SECTION - Run /etc/librenms/snmpd-config-generator.sh to update +# ============================================================================ + +# Thermal sensors (LM-SENSORS-MIB via pass) +config pass + option name 'lm-sensors' + option prog '/etc/librenms/lm-sensors-pass.sh' + option miboid '.1.3.6.1.4.1.2021.13.16.2.1' + +config engineid + option engineidtype '3' + option engineidnic 'br-lan' + +config snmpd 'general' + option enabled '1' diff --git a/snmp/Openwrt/snmpd-config-generator.sh b/snmp/Openwrt/snmpd-config-generator.sh old mode 100644 new mode 100755 index b6e1e38c3..fff05f907 --- a/snmp/Openwrt/snmpd-config-generator.sh +++ b/snmp/Openwrt/snmpd-config-generator.sh @@ -1,91 +1,77 @@ #!/bin/sh - -# snmpd-config-generator.sh -# Generates SNMP extend entries for all wireless interfaces dynamically -# Usage: Run this script to generate UCI config commands for /etc/config/snmpd +set -eu +# snmpd-config-generator.sh - LibreNMS OpenWrt wireless SNMP extends (file-less) SCRIPT_DIR="/etc/librenms" -INTERFACES_FILE="$SCRIPT_DIR/wlInterfaces.txt" +printf "\n# Generated %s - Wireless + sensors\n" "$(date)" +echo "# LIBRENMS_OPENWRT_AUTOGEN_BEGIN" + +cat << EOF + +# Interface map for LibreNMS OpenWrt wireless discovery +config extend + option name 'interfaces' + option prog '$SCRIPT_DIR/wlInterfaces.sh' + +# Aggregate client count across active wireless interfaces +config extend + option name 'clients-wlan' + option prog '$SCRIPT_DIR/wlClients.sh' + +EOF -# Ensure wlInterfaces.txt exists -if [ ! -f "$INTERFACES_FILE" ]; then - echo "Generating $INTERFACES_FILE..." - $SCRIPT_DIR/wlClients.sh >/dev/null 2>&1 -fi +# Live ubus discovery (ap1 wl*, ap2 wlan*, risc phy*) +ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do + # Robust ssid (handles JSON variance) + ssid=$(ubus call "hostapd.$iface" get_status 2>/dev/null | \ + sed -n 's/.*"ssid":"\?\([^",]*\)"\?.*/\1/p' | head -1 || echo unknown) + [ -n "$iface" ] || continue -# Read interfaces -if [ ! -f "$INTERFACES_FILE" ]; then - echo "Error: Could not find or generate $INTERFACES_FILE" - exit 1 -fi + [ -n "$ssid" ] || ssid="$iface" -# Generate config for each interface -cat "$INTERFACES_FILE" | while IFS=',' read -r iface ssid; do - [ -z "$iface" ] && continue - - # Sanitize interface name for use in UCI names (replace - with _) - safe_name=$(echo "$iface" | tr '-' '_') - - echo "" - echo "# Interface: $iface ($ssid)" - echo "" - - # Clients - echo "config extend" - echo " option name 'clients-$iface'" - echo " option prog '$SCRIPT_DIR/wlClients.sh'" - echo " option args '$iface'" - echo "" - - # Frequency - echo "config extend" - echo " option name 'frequency-$iface'" - echo " option prog '$SCRIPT_DIR/wlFrequency.sh'" - echo " option args '$iface'" - echo "" - - # Rate TX (min, avg, max) - for stat in min avg max; do - echo "config extend" - echo " option name 'rate-tx-$iface-$stat'" - echo " option prog '$SCRIPT_DIR/wlRate.sh'" - echo " option args '$iface tx $stat'" - echo "" - done - - # Rate RX (min, avg, max) - for stat in min avg max; do - echo "config extend" - echo " option name 'rate-rx-$iface-$stat'" - echo " option prog '$SCRIPT_DIR/wlRate.sh'" - echo " option args '$iface rx $stat'" - echo "" - done - - # Noise floor - echo "config extend" - echo " option name 'noise-floor-$iface'" - echo " option prog '$SCRIPT_DIR/wlNoiseFloor.sh'" - echo " option args '$iface'" - echo "" - - # SNR (min, avg, max) - for stat in min avg max; do - echo "config extend" - echo " option name 'snr-$iface-$stat'" - echo " option prog '$SCRIPT_DIR/wlSNR.sh'" - echo " option args '$iface $stat'" - echo "" - done + cat << EOF + +# $ssid ($iface) +config extend + option name 'clients-$iface' + option prog '$SCRIPT_DIR/wlClients.sh' + option args '$iface' + +config extend + option name 'frequency-$iface' + option prog '$SCRIPT_DIR/wlFrequency.sh' + option args '$iface' + +config extend + option name 'noise-floor-$iface' + option prog '$SCRIPT_DIR/wlNoiseFloor.sh' + option args '$iface' + +EOF + # Rates/SNR loops (add your for min/avg/max...) + for dir in tx rx; do for stat in min avg max; do cat << EOF +config extend + option name 'rate-${dir}-$iface-$stat' + option prog '$SCRIPT_DIR/wlRate.sh' + option args '$iface $dir $stat' +EOF + done; done + for stat in min avg max; do cat << EOF +config extend + option name 'snr-$iface-$stat' + option prog '$SCRIPT_DIR/wlSNR.sh' + option args '$iface $stat' +EOF + done done -# Generate thermal sensor config using pass (LM-SENSORS-MIB) -echo "" -echo "# Thermal Sensors (LM-SENSORS-MIB via pass)" -echo "" +cat << EOF + +# Sensors (always) +config pass + option name 'lm-sensors' + option prog '$SCRIPT_DIR/lm-sensors-pass.sh' + option miboid '.1.3.6.1.4.1.2021.13.16.2.1' +EOF -echo "config pass" -echo " option name 'lm-sensors'" -echo " option prog '$SCRIPT_DIR/lm-sensors-pass.sh'" -echo " option miboid '.1.3.6.1.4.1.2021.13.16.2.1'" -echo "" +echo "# LIBRENMS_OPENWRT_AUTOGEN_END" diff --git a/snmp/Openwrt/wlClients.sh b/snmp/Openwrt/wlClients.sh index 0e9579e82..8f072ecce 100755 --- a/snmp/Openwrt/wlClients.sh +++ b/snmp/Openwrt/wlClients.sh @@ -1,101 +1,150 @@ #!/bin/sh +set -e -# wlClients.sh -# Counts connected (associated) Wi-Fi devices -# Arguments: target interface. Assumes all interfaces if no argument -# Auto-generates wlInterfaces.txt if it doesn't exist +# wlClients.sh - OpenWrt wireless client count via ubus or fallback +# Usage: wlClients.sh [interface] # outputs integer count, always exit 0 for SNMP -# Get path to this script (ash-compatible) scriptdir="$(cd "$(dirname "$0")" && pwd)" -interfaces_file="$scriptdir/wlInterfaces.txt" - -# Function to auto-detect and generate wlInterfaces.txt -generate_interfaces_file() { - local tmpfile="$interfaces_file.tmp" - - # Find all wireless interfaces that are actually in use - for dev in /sys/class/net/*; do - iface=$(basename "$dev") - - # Skip known non-client interfaces - case "$iface" in - mld*|mon.*) #|wifi*|phy*|wlan-* - continue - ;; - esac - - # Check if it's a wireless interface - if [ -d "$dev/wireless" ] || [ -d "$dev/phy80211" ]; then - # Get interface type and SSID using iw first - iw_info=$(/usr/sbin/iw dev "$iface" info 2>/dev/null) - iface_type=$(echo "$iw_info" | /usr/bin/awk '/^[[:space:]]*type / {print $2; exit}') - ssid=$(echo "$iw_info" | /bin/grep ssid | /usr/bin/cut -f 2 -s -d" " | /usr/bin/tr -d '\n') - - # Skip AP/VLAN interfaces which can report "ESSID: unknown" - [ "$iface_type" = "AP/VLAN" ] && continue - - # If no SSID from iw, try iwinfo - if [ -z "$ssid" ]; then - ssid=$(/usr/bin/iwinfo "$iface" info 2>/dev/null | /bin/sed -n \ - -e 's/.*ESSID: "\(.*\)".*/\1/p' \ - -e 's/.*ESSID: \(.*\)$/\1/p' | /usr/bin/head -n 1) - fi - - # Skip interfaces without SSID (not active AP/client interfaces) - [ -z "$ssid" ] && continue - - # Skip malformed or unknown SSIDs - case "$ssid" in - unknown|*ESSID:*) - continue - ;; - esac - - # Add to list (include even if DOWN, since SSID means it's configured) - echo "$iface,$ssid" >> "$tmpfile" - fi - done - - # Only replace if we found interfaces - if [ -s "$tmpfile" ]; then - mv "$tmpfile" "$interfaces_file" - return 0 - else - rm -f "$tmpfile" - return 1 - fi +interfaces_script="$scriptdir/wlInterfaces.sh" + +normalize_iface() { + # Keep only expected interface characters and strip CR/LF noise. + printf '%s' "$1" | tr -d '\r\n' | sed 's/^[[:space:]]*//; s/[[:space:]]*$//' | sed 's/[^A-Za-z0-9._:-]//g' } -# Check if wlInterfaces.txt exists, generate if not -if [ ! -f "$interfaces_file" ]; then - generate_interfaces_file - if [ $? -ne 0 ]; then - /bin/echo "Error: Could not generate $interfaces_file and file does not exist" - exit 1 - fi -fi +count_iface_clients() { + iface="$1" + new=0 -# Check number of arguments -if [ $# -gt 1 ]; then - /bin/echo "Usage: wlClients.sh [interface]" - /bin/echo "Too many command line arguments, exiting." - exit 1 -fi + if ubus call "hostapd.$iface" get_clients >/dev/null 2>&1; then + new=$(list_assoc_macs_ubus "$iface" | awk 'NF' | sort -u | wc -l | awk '{print $1}') + else + # Fallback: iwinfo for generic OpenWrt + new=$(list_assoc_macs_iwinfo "$iface" | awk 'NF' | sort -u | wc -l | awk '{print $1}') + fi + + # Normalize to a single integer token to avoid arithmetic parse errors. + new=$(printf '%s\n' "$new" | awk 'NF {print $1; exit}') + case "$new" in + ''|*[!0-9]*) new=0 ;; + esac + + printf '%s\n' "$new" +} + +list_assoc_macs_ubus() { + iface="$1" + ubus call "hostapd.$iface" get_clients 2>/dev/null | awk ' + BEGIN { IGNORECASE=1; mac=""; mld="" } + /^[[:space:]]*"([0-9a-f]{2}:){5}[0-9a-f]{2}"[[:space:]]*:[[:space:]]*\{/ { + if (mac != "") { + if (mld != "" && mld != "00:00:00:00:00:00") { + print mld + } else { + print mac + } + } + if (match($0, /"([0-9a-f]{2}:){5}[0-9a-f]{2}"/)) { + mac=tolower(substr($0, RSTART+1, RLENGTH-2)) + mld="" + } + } + /"mld_addr"[[:space:]]*:[[:space:]]*"([0-9a-f]{2}:){5}[0-9a-f]{2}"/ { + if (match($0, /"([0-9a-f]{2}:){5}[0-9a-f]{2}"[[:space:]]*$/)) { + val=tolower(substr($0, RSTART+1, RLENGTH-2)) + if (val != "00:00:00:00:00:00") { + mld=val + } + } else if (match($0, /"([0-9a-f]{2}:){5}[0-9a-f]{2}"/)) { + val=tolower(substr($0, RSTART+1, RLENGTH-2)) + if (val != "00:00:00:00:00:00") { + mld=val + } + } + } + END { + if (mac != "") { + if (mld != "" && mld != "00:00:00:00:00:00") { + print mld + } else { + print mac + } + } + } + ' +} + +list_assoc_macs_iwinfo() { + iface="$1" + iwinfo "$iface" assoclist 2>/dev/null | awk ' + BEGIN { IGNORECASE=1 } + /^[[:space:]]*([0-9a-f]{2}:){5}[0-9a-f]{2}[[:space:]]/ { + print tolower($1) + } + ' +} + +count_aggregate_unique_clients() { + interfaces="$1" + + if [ -z "$interfaces" ]; then + echo 0 + return + fi -# Get interface list. Set target, which is name returned for interface -if [ "$1" ]; then - interfaces=$1 + tmp="/tmp/wlClients.macs.$$" + trap 'rm -f "$tmp"' EXIT + : > "$tmp" + + for iface in $interfaces; do + iface=$(normalize_iface "$iface") + [ -n "$iface" ] || continue + + if ubus call "hostapd.$iface" get_clients >/dev/null 2>&1; then + list_assoc_macs_ubus "$iface" >> "$tmp" || true + else + list_assoc_macs_iwinfo "$iface" >> "$tmp" || true + fi + done + + awk 'NF' "$tmp" | sort -u | wc -l | awk '{print $1}' +} + +get_live_interfaces() { + # Reuse wlInterfaces.sh so we only count interfaces currently exported to LibreNMS. + if [ -x "$interfaces_script" ]; then + "$interfaces_script" 2>/dev/null | while IFS= read -r line; do + iface=$(printf '%s' "$line" | cut -d',' -f1) + iface=$(normalize_iface "$iface") + [ -n "$iface" ] && printf '%s\n' "$iface" + done | awk '!seen[$0]++' + fi +} + +# Args: single iface or all +if [ "${1:-}" ]; then + interfaces="$(normalize_iface "$1")" else - interfaces=$(cat "$interfaces_file" | cut -f 1 -d",") + interfaces=$(get_live_interfaces) fi -# Count associated devices count=0 -for interface in $interfaces -do - new=$(/usr/sbin/iw dev "$interface" station dump 2>/dev/null | /bin/grep Station | /usr/bin/cut -f 2 -s -d" " | /usr/bin/wc -l) - count=$(( count + new )) -done - -# Return snmp result -/bin/echo $count + +if [ "${1:-}" ]; then + # Per-interface mode (used by clients-) + for iface in $interfaces; do + iface=$(normalize_iface "$iface") + [ -n "$iface" ] || continue + new=$(count_iface_clients "$iface") + count=$((count + new)) + done +else + # Aggregate mode (clients-wlan): dedupe MACs to avoid MLO/MLD double-counting. + count=$(count_aggregate_unique_clients "$interfaces") +fi + +# Output count first line for nsExtendOutput1Line queries. +echo "$count" +echo "# wlClients for $interfaces" +exit 0 + diff --git a/snmp/Openwrt/wlFrequency.sh b/snmp/Openwrt/wlFrequency.sh index e4bcd1cac..67133e1ff 100755 --- a/snmp/Openwrt/wlFrequency.sh +++ b/snmp/Openwrt/wlFrequency.sh @@ -14,5 +14,6 @@ fi # Extract frequency frequency=$(/usr/sbin/iw dev "$1" info 2>/dev/null | /bin/grep channel | /usr/bin/cut -f 2- -s -d" " | /usr/bin/cut -f 2- -s -d"(" | /usr/bin/cut -f 1 -s -d" ") -# Return snmp result +# Return snmp result (multi-line for nsExtendOutputFull) /bin/echo "$frequency" +/bin/echo "# wlFrequency for $1" diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh new file mode 100755 index 000000000..cd90f80bc --- /dev/null +++ b/snmp/Openwrt/wlInterfaces.sh @@ -0,0 +1,206 @@ +#!/bin/sh +set -eu + +# wlInterfaces.sh - Emit OpenWrt wireless interfaces for LibreNMS. +# Output format: , +# +# Display name behavior: +# - Prefer SSID (ex: bmg) +# - If multiple interfaces share SSID, append band suffix (ex: moodsy24, moodsy5, moodsy6) + +tmp="/tmp/wlInterfaces.$$.tmp" +trap 'rm -f "$tmp"' EXIT + +get_json_value() { + key="$1" + if command -v jsonfilter >/dev/null 2>&1; then + jsonfilter -e "@.$key" 2>/dev/null || true + else + sed -n "s/.*\"$key\"[[:space:]]*:[[:space:]]*\"\([^\"]*\)\".*/\1/p" | head -1 + fi +} + +get_ssid_from_hostapd() { + iface="$1" + hapd="hostapd.$iface" + + status_json=$(ubus call "$hapd" get_status 2>/dev/null || true) + ssid=$(printf '%s' "$status_json" | get_json_value ssid) + if [ -n "$ssid" ]; then + printf '%s' "$ssid" + return + fi + + config_json=$(ubus call "$hapd" get_config 2>/dev/null || true) + ssid=$(printf '%s' "$config_json" | get_json_value ssid) + if [ -n "$ssid" ]; then + printf '%s' "$ssid" + fi +} + +is_iface_active_hostapd() { + iface="$1" + hapd="hostapd.$iface" + status_json=$(ubus call "$hapd" get_status 2>/dev/null || true) + + # Accept common active states across OpenWrt variants. + up=$(printf '%s' "$status_json" | get_json_value up) + state=$(printf '%s' "$status_json" | get_json_value state) + status=$(printf '%s' "$status_json" | get_json_value status) + + case "$up" in + 1|true|TRUE|True) return 0 ;; + esac + case "$state" in + ENABLED|enabled|RUNNING|running) return 0 ;; + esac + case "$status" in + ENABLED|enabled|RUNNING|running) return 0 ;; + esac + + return 1 +} + +get_ssid_from_iwinfo() { + iface="$1" + iwinfo "$iface" info 2>/dev/null | sed -n 's/.*ESSID:[[:space:]]*"\([^"]*\)".*/\1/p' | head -1 +} + +get_freq_mhz_from_hostapd() { + iface="$1" + hapd="hostapd.$iface" + status_json=$(ubus call "$hapd" get_status 2>/dev/null || true) + printf '%s' "$status_json" | get_json_value freq +} + +get_band_suffix() { + iface="$1" + freq_mhz_hint="${2:-}" + + # If we already have a hostapd freq (MHz), use it directly. + if [ -n "$freq_mhz_hint" ]; then + case "$freq_mhz_hint" in + *[!0-9]*|'') ;; + *) + if [ "$freq_mhz_hint" -ge 5925 ]; then + printf '6' + return + elif [ "$freq_mhz_hint" -ge 4900 ]; then + printf '5' + return + elif [ "$freq_mhz_hint" -ge 2300 ]; then + printf '24' + return + fi + ;; + esac + fi + + info=$(iwinfo "$iface" info 2>/dev/null || true) + + # Prefer explicit frequency in GHz if present. + freq=$(printf '%s\n' "$info" | sed -n 's/.*Frequency:[[:space:]]*\([0-9.]\+\).*/\1/p' | head -1) + freq_mhz='' + if [ -n "$freq" ]; then + freq_mhz=$(printf '%s\n' "$freq" | awk '{printf "%d", ($1 * 1000)}') + fi + # Fall back to channel mapping if frequency wasn't parsed. + channel=$(printf '%s\n' "$info" | sed -n 's/.*Channel:[[:space:]]*\([0-9]\+\).*/\1/p' | head -1) + + if [ -n "$freq_mhz" ]; then + if [ "$freq_mhz" -ge 5925 ]; then + printf '6' + return + elif [ "$freq_mhz" -ge 4900 ]; then + printf '5' + return + elif [ "$freq_mhz" -ge 2300 ]; then + printf '24' + return + fi + fi + + case "$freq" in + 2.*) printf '24' ;; + 5.*) printf '5' ;; + 6.*) printf '6' ;; + *) + case "$channel" in + '' ) printf '' ;; + [1-9]|1[0-4]) printf '24' ;; + 3[0-9]|4[0-9]|5[0-9]|6[0-9]|7[0-9]|8[0-9]|9[0-9]|1[0-7][0-9]) printf '5' ;; + 2[0-9][0-9]|3[0-9][0-9]) printf '6' ;; + *) printf '' ;; + esac + ;; + esac +} + +emit_iface_records() { + if ubus list hostapd.* >/dev/null 2>&1; then + ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do + [ -n "$iface" ] || continue + is_iface_active_hostapd "$iface" || continue + ssid=$(get_ssid_from_hostapd "$iface") + if [ -z "$ssid" ] && command -v iwinfo >/dev/null 2>&1; then + ssid=$(get_ssid_from_iwinfo "$iface") + fi + band='' + if command -v iwinfo >/dev/null 2>&1; then + freq_mhz=$(get_freq_mhz_from_hostapd "$iface") + band=$(get_band_suffix "$iface" "$freq_mhz") + fi + [ -n "$ssid" ] || ssid="$iface" + printf '%s\t%s\t%s\n' "$iface" "$ssid" "$band" + done + return + fi + + if command -v iw >/dev/null 2>&1; then + iw dev 2>/dev/null | awk '/Interface /{print $2}' | while IFS= read -r iface; do + [ -n "$iface" ] || continue + ssid="$iface" + band='' + if command -v iwinfo >/dev/null 2>&1; then + found_ssid=$(get_ssid_from_iwinfo "$iface") + [ -n "$found_ssid" ] && ssid="$found_ssid" + band=$(get_band_suffix "$iface") + fi + printf '%s\t%s\t%s\n' "$iface" "$ssid" "$band" + done + return + fi + + if command -v iwinfo >/dev/null 2>&1; then + iwinfo 2>/dev/null | awk '{print $1}' | while IFS= read -r iface; do + [ -n "$iface" ] || continue + ssid=$(get_ssid_from_iwinfo "$iface") + [ -n "$ssid" ] || ssid="$iface" + band=$(get_band_suffix "$iface") + printf '%s\t%s\t%s\n' "$iface" "$ssid" "$band" + done + fi +} + +emit_iface_records > "$tmp" + +awk -F '\t' ' + { rows[NR]=$0; ssid=$2; if (ssid != "") count[ssid]++ } + END { + for (i=1; i<=NR; i++) { + split(rows[i], f, "\t") + iface=f[1]; ssid=f[2]; band=f[3] + label=ssid + if (count[ssid] > 1 && band != "" && ssid != iface) { + label=ssid band + } + if (label == "" || label == iface) { + printf "%s,%s\n", iface, iface + } else { + printf "%s,%s (%s)\n", iface, iface, label + } + } + } +' "$tmp" + +exit 0 diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index 9f646954c..e4eaf9174 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -15,5 +15,6 @@ fi # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | grep -v "^$" | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) -# Return snmp result +# Return snmp result (multi-line for nsExtendOutputFull) /bin/echo "$noise" +/bin/echo "# wlNoiseFloor for $1" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index d53068560..fc512cbda 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -17,16 +17,19 @@ fi # Calculate result. Sum just for debug, and have to return integer # => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") -result=0 -if [ "$3" = "sum" ]; then - result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum}') -elif [ "$3" = "avg" ]; then - result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' '{sum += $2} END {printf "%d\n", 1000000*sum/NR}') -elif [ "$3" = "min" ]; then - result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 < min {min = $2} END {printf "%d\n", 1000000*min}') -elif [ "$3" = "max" ]; then - result=$(/bin/echo "$ratelist" | /usr/bin/awk -F ':' 'NR == 1 || $2 > max {max = $2} END {printf "%d\n", 1000000*max}') -fi -# Return snmp result -echo "$result" +# Calculate min/avg/max rates +min_rate=$(/bin/echo "$ratelist" | awk 'NR==1{min=$1} $10 ? sum/n : 0)}') +max_rate=$(/bin/echo "$ratelist" | awk '$1>max{max=$1} END{printf "%d\n", (max=="" ? 0 : max)}') + +case "$3" in + min) echo "$min_rate" ;; + avg) echo "$avg_rate" ;; + max) echo "$max_rate" ;; + *) echo "0" ;; +esac + +# Second line for nsExtendOutputFull compatibility +echo "# wlRate $1 $2 $3" +exit 0 diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 006cae071..907033827 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -15,15 +15,18 @@ fi # Calculate result. Sum just for debug, and return integer (safest / easiest) snrlist=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) -if [ "$2" = "sum" ]; then - result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum}') -elif [ "$2" = "avg" ]; then - result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' '{sum += $1} END {printf "%d\n", sum/NR}') -elif [ "$2" = "min" ]; then - result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 < min {min = $1} END {printf "%d\n", min}') -elif [ "$2" = "max" ]; then - result=$(/bin/echo "$snrlist" | /usr/bin/awk -F ':' 'NR == 1 || $1 > max {max = $1} END {printf "%d\n", max}') -fi -# Return snmp result -echo "$result" +min_snr=$(/bin/echo "$snrlist" | awk 'NR==1{min=$1} $10 ? sum/n : 0)}') +max_snr=$(/bin/echo "$snrlist" | awk '$1>max{max=$1} END{printf "%d\n", (max=="" ? 0 : max)}') + +case "$2" in + min) echo "$min_snr" ;; + avg) echo "$avg_snr" ;; + max) echo "$max_snr" ;; + *) echo "0" ;; +esac + +# Second line for nsExtendOutputFull compatibility +echo "# wlSNR $1 $2" +exit 0 From 39c6d2169fbe1b29e430ebc51993284712fd2b78 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 05:56:23 +0100 Subject: [PATCH 481/497] OpenWrt lm-sensors-pass: fix shellcheck and getnext flow --- snmp/Openwrt/lm-sensors-pass.sh | 47 ++++++++++++++++++++------------- 1 file changed, 28 insertions(+), 19 deletions(-) diff --git a/snmp/Openwrt/lm-sensors-pass.sh b/snmp/Openwrt/lm-sensors-pass.sh index 763a2b45b..1b052b74e 100755 --- a/snmp/Openwrt/lm-sensors-pass.sh +++ b/snmp/Openwrt/lm-sensors-pass.sh @@ -10,7 +10,7 @@ BASE_OID=".1.3.6.1.4.1.2021.13.16.2.1" # Output format: index:name:temp # Re-indexes zones sequentially starting from 1 get_zones() { - local idx=0 + idx=0 for zone in /sys/devices/virtual/thermal/thermal_zone*; do [ -d "$zone" ] || continue idx=$((idx + 1)) @@ -26,7 +26,7 @@ case "$1" in REQ_OID="$2" FOUND=0 - while IFS=':' read idx name temp; do + while IFS=':' read -r idx name temp; do case "$REQ_OID" in "$BASE_OID.1.$idx") echo "$REQ_OID" @@ -62,30 +62,38 @@ EOF REQ_OID="$2" # Create temporary file with all OIDs - TMP_FILE="/tmp/snmp_oids.$$" - > "$TMP_FILE" + TMP_FILE="$(mktemp /tmp/snmp_oids.XXXXXX 2>/dev/null || echo "/tmp/snmp_oids.$$")" + TMP_SORTED="${TMP_FILE}.sorted" + TMP_LIST="${TMP_FILE}.list" + : > "$TMP_FILE" + trap 'rm -f "$TMP_FILE" "$TMP_SORTED" "$TMP_LIST"' EXIT INT TERM - get_zones | while IFS=':' read idx name temp; do + get_zones | while IFS=':' read -r idx name temp; do # Pad index to ensure proper numeric sorting # Format: column.index where index is zero-padded to 3 digits - printf "%d.%03d|$BASE_OID.1.$idx|integer|$idx\n" 1 $idx >> "$TMP_FILE" - printf "%d.%03d|$BASE_OID.2.$idx|string|$name\n" 2 $idx >> "$TMP_FILE" - printf "%d.%03d|$BASE_OID.3.$idx|gauge|$temp\n" 3 $idx >> "$TMP_FILE" + printf '%d.%03d|%s.1.%s|integer|%s\n' 1 "$idx" "$BASE_OID" "$idx" "$idx" >> "$TMP_FILE" + printf '%d.%03d|%s.2.%s|string|%s\n' 2 "$idx" "$BASE_OID" "$idx" "$name" >> "$TMP_FILE" + printf '%d.%03d|%s.3.%s|gauge|%s\n' 3 "$idx" "$BASE_OID" "$idx" "$temp" >> "$TMP_FILE" done # Sort by our padded key, then extract and compare OIDs - sort -t'|' -k1 "$TMP_FILE" | cut -d'|' -f2- | while IFS='|' read oid type value; do + sort -t'|' -k1 "$TMP_FILE" > "$TMP_SORTED" + cut -d'|' -f2- "$TMP_SORTED" > "$TMP_LIST" + + FOUND=0 + while IFS='|' read -r oid type value; do # Use awk for proper numeric OID comparison is_greater=$(awk -v req="$REQ_OID" -v curr="$oid" ' BEGIN { # Split OIDs into arrays - split(req, req_parts, "."); - split(curr, curr_parts, "."); + req_len = split(req, req_parts, "."); + curr_len = split(curr, curr_parts, "."); + max_len = (curr_len > req_len) ? curr_len : req_len; # Compare each part numerically - for (i = 1; i <= length(curr_parts); i++) { - req_val = (i <= length(req_parts)) ? req_parts[i] : 0; - curr_val = curr_parts[i]; + for (i = 1; i <= max_len; i++) { + req_val = (i <= req_len) ? req_parts[i] : 0; + curr_val = (i <= curr_len) ? curr_parts[i] : 0; if (curr_val > req_val) { print "1"; @@ -103,13 +111,14 @@ EOF echo "$oid" echo "$type" echo "$value" - rm -f "$TMP_FILE" - exit 0 + FOUND=1 + break fi - done + done < "$TMP_LIST" - rm -f "$TMP_FILE" - echo "NONE" + if [ "$FOUND" -eq 0 ]; then + echo "NONE" + fi ;; *) From 091ca011cf6b1f081adae451eafb0f484081a9b9 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 06:09:33 +0100 Subject: [PATCH 482/497] OpenWrt lm-sensors-pass: address ShellCheck SC2129 --- snmp/Openwrt/lm-sensors-pass.sh | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/snmp/Openwrt/lm-sensors-pass.sh b/snmp/Openwrt/lm-sensors-pass.sh index 1b052b74e..79cce7d8a 100755 --- a/snmp/Openwrt/lm-sensors-pass.sh +++ b/snmp/Openwrt/lm-sensors-pass.sh @@ -71,9 +71,11 @@ EOF get_zones | while IFS=':' read -r idx name temp; do # Pad index to ensure proper numeric sorting # Format: column.index where index is zero-padded to 3 digits - printf '%d.%03d|%s.1.%s|integer|%s\n' 1 "$idx" "$BASE_OID" "$idx" "$idx" >> "$TMP_FILE" - printf '%d.%03d|%s.2.%s|string|%s\n' 2 "$idx" "$BASE_OID" "$idx" "$name" >> "$TMP_FILE" - printf '%d.%03d|%s.3.%s|gauge|%s\n' 3 "$idx" "$BASE_OID" "$idx" "$temp" >> "$TMP_FILE" + { + printf '%d.%03d|%s.1.%s|integer|%s\n' 1 "$idx" "$BASE_OID" "$idx" "$idx" + printf '%d.%03d|%s.2.%s|string|%s\n' 2 "$idx" "$BASE_OID" "$idx" "$name" + printf '%d.%03d|%s.3.%s|gauge|%s\n' 3 "$idx" "$BASE_OID" "$idx" "$temp" + } >> "$TMP_FILE" done # Sort by our padded key, then extract and compare OIDs From c8d77bced01f7fc29c71b9a08cab0500ca9515e9 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 06:32:37 +0100 Subject: [PATCH 483/497] OpenWrt wlRate: parse numeric bitrate field correctly --- snmp/Openwrt/wlRate.sh | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index fc512cbda..a2ca2a0df 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -14,9 +14,19 @@ if [ $# -ne 3 ]; then exit 1 fi -# Calculate result. Sum just for debug, and have to return integer -# => If not integer (e.g. 2.67e+07), LibreNMS will drop the exponent (result, 2.67 bits/sec!) -ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | /bin/grep "$2 bitrate" | /usr/bin/cut -f 2 -s -d" ") +# Extract numeric bitrate values from "tx bitrate:" / "rx bitrate:" lines. +# Example input line: +# tx bitrate: 1201.0 MBit/s HE-MCS 11 HE-NSS 2 HE-GI 0 HE-DCM 0 +ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | awk -v dir="$2" ' + tolower($1) == dir && $2 == "bitrate:" { + for (i = 3; i <= NF; i++) { + if ($i ~ /^[0-9]+(\.[0-9]+)?$/) { + print $i; + break; + } + } + } +') # Calculate min/avg/max rates min_rate=$(/bin/echo "$ratelist" | awk 'NR==1{min=$1} $1 Date: Wed, 11 Mar 2026 06:39:27 +0100 Subject: [PATCH 484/497] OpenWrt wlRate: add iwinfo fallback for Flint variants --- snmp/Openwrt/wlRate.sh | 50 +++++++++++++++++++++++++++++++++++------- 1 file changed, 42 insertions(+), 8 deletions(-) diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index a2ca2a0df..e7a48a604 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -14,19 +14,53 @@ if [ $# -ne 3 ]; then exit 1 fi +iface="$1" +dir="$2" + +IW_BIN=$(command -v iw 2>/dev/null || true) +IWINFO_BIN=$(command -v iwinfo 2>/dev/null || true) + # Extract numeric bitrate values from "tx bitrate:" / "rx bitrate:" lines. # Example input line: # tx bitrate: 1201.0 MBit/s HE-MCS 11 HE-NSS 2 HE-GI 0 HE-DCM 0 -ratelist=$(/usr/sbin/iw dev "$1" station dump 2>/dev/null | awk -v dir="$2" ' - tolower($1) == dir && $2 == "bitrate:" { - for (i = 3; i <= NF; i++) { - if ($i ~ /^[0-9]+(\.[0-9]+)?$/) { - print $i; - break; +ratelist="" +if [ -n "$IW_BIN" ]; then + ratelist=$($IW_BIN dev "$iface" station dump 2>/dev/null | awk -v d="$dir" ' + { + key1 = tolower($1) + key2 = tolower($2) + if (key1 == d && key2 == "bitrate:") { + for (i = 3; i <= NF; i++) { + if ($i ~ /^[0-9]+(\.[0-9]+)?$/) { + print $i + break + } + } } } - } -') + ') +fi + +# Fallback for devices where iw station dump is unavailable or differently formatted. +if [ -z "$ratelist" ] && [ -n "$IWINFO_BIN" ]; then + if [ "$dir" = "tx" ]; then + ratelist=$($IWINFO_BIN "$iface" assoclist 2>/dev/null | awk ' + /TX:[[:space:]]*[0-9]+(\.[0-9]+)?[[:space:]]*MBit\/s/ { + if (match($0, /TX:[[:space:]]*([0-9]+(\.[0-9]+)?)[[:space:]]*MBit\/s/, m)) { + print m[1] + } + } + ') + else + ratelist=$($IWINFO_BIN "$iface" assoclist 2>/dev/null | awk ' + /RX:[[:space:]]*[0-9]+(\.[0-9]+)?[[:space:]]*MBit\/s/ { + if (match($0, /RX:[[:space:]]*([0-9]+(\.[0-9]+)?)[[:space:]]*MBit\/s/, m)) { + print m[1] + } + } + ') + fi +fi # Calculate min/avg/max rates min_rate=$(/bin/echo "$ratelist" | awk 'NR==1{min=$1} $1 Date: Wed, 11 Mar 2026 06:42:09 +0100 Subject: [PATCH 485/497] OpenWrt wlRate: use BusyBox-safe iwinfo parser --- snmp/Openwrt/wlRate.sh | 25 +++++++++---------------- 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index e7a48a604..be2d76f17 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -43,23 +43,16 @@ fi # Fallback for devices where iw station dump is unavailable or differently formatted. if [ -z "$ratelist" ] && [ -n "$IWINFO_BIN" ]; then - if [ "$dir" = "tx" ]; then - ratelist=$($IWINFO_BIN "$iface" assoclist 2>/dev/null | awk ' - /TX:[[:space:]]*[0-9]+(\.[0-9]+)?[[:space:]]*MBit\/s/ { - if (match($0, /TX:[[:space:]]*([0-9]+(\.[0-9]+)?)[[:space:]]*MBit\/s/, m)) { - print m[1] - } - } - ') - else - ratelist=$($IWINFO_BIN "$iface" assoclist 2>/dev/null | awk ' - /RX:[[:space:]]*[0-9]+(\.[0-9]+)?[[:space:]]*MBit\/s/ { - if (match($0, /RX:[[:space:]]*([0-9]+(\.[0-9]+)?)[[:space:]]*MBit\/s/, m)) { - print m[1] - } + ratelist=$($IWINFO_BIN "$iface" assoclist 2>/dev/null | awk -v d="$dir" ' + { + key = tolower($1) + sub(/:$/, "", key) + + if (key == d && $2 ~ /^[0-9]+(\.[0-9]+)?$/) { + print $2 } - ') - fi + } + ') fi # Calculate min/avg/max rates From 83bc30f8877cf9cb827a2096a352f7cc85b81ceb Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 07:05:17 +0100 Subject: [PATCH 486/497] OpenWrt wireless discovery: include client uplinks and skip inactive VAPs --- snmp/Openwrt/snmpd-config-generator.sh | 10 +-- snmp/Openwrt/wlInterfaces.sh | 101 ++++++++++++++++++++----- snmp/Openwrt/wlNoiseFloor.sh | 14 ++++ snmp/Openwrt/wlRate.sh | 10 +++ snmp/Openwrt/wlSNR.sh | 22 ++++++ 5 files changed, 132 insertions(+), 25 deletions(-) diff --git a/snmp/Openwrt/snmpd-config-generator.sh b/snmp/Openwrt/snmpd-config-generator.sh index fff05f907..27e8b21ef 100755 --- a/snmp/Openwrt/snmpd-config-generator.sh +++ b/snmp/Openwrt/snmpd-config-generator.sh @@ -20,14 +20,12 @@ config extend EOF -# Live ubus discovery (ap1 wl*, ap2 wlan*, risc phy*) -ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do - # Robust ssid (handles JSON variance) - ssid=$(ubus call "hostapd.$iface" get_status 2>/dev/null | \ - sed -n 's/.*"ssid":"\?\([^",]*\)"\?.*/\1/p' | head -1 || echo unknown) +# Use the same interface inventory that LibreNMS discovery consumes. +"$SCRIPT_DIR/wlInterfaces.sh" 2>/dev/null | while IFS=',' read -r iface label; do [ -n "$iface" ] || continue - [ -n "$ssid" ] || ssid="$iface" + ssid=$(printf '%s' "$label" | sed 's/^[[:space:]]*//; s/[[:space:]]*$//') + [ -n "$ssid" ] || ssid="$iface" cat << EOF diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh index cd90f80bc..d3a9e5b62 100755 --- a/snmp/Openwrt/wlInterfaces.sh +++ b/snmp/Openwrt/wlInterfaces.sh @@ -66,6 +66,73 @@ get_ssid_from_iwinfo() { iwinfo "$iface" info 2>/dev/null | sed -n 's/.*ESSID:[[:space:]]*"\([^"]*\)".*/\1/p' | head -1 } +get_iwinfo_info() { + iface="$1" + iwinfo "$iface" info 2>/dev/null || true +} + +get_mode_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Mode:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 +} + +get_signal_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Signal:[[:space:]]*\([^ ]\+\)[[:space:]]*dBm.*/\1/p' | head -1 +} + +get_bitrate_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Bit Rate:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 +} + +get_access_point_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Access Point:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 +} + +should_include_iwinfo_iface() { + iface="$1" + info=$(get_iwinfo_info "$iface") + [ -n "$info" ] || return 1 + + ssid=$(printf '%s\n' "$info" | sed -n 's/.*ESSID:[[:space:]]*"\([^"]*\)".*/\1/p' | head -1) + case "$ssid" in + ''|unknown) return 1 ;; + esac + + # Skip synthetic VLAN helper interfaces. + printf '%s\n' "$info" | grep -q '(VLAN)' && return 1 + + mode=$(get_mode_from_iwinfo "$info") + signal=$(get_signal_from_iwinfo "$info") + bitrate=$(get_bitrate_from_iwinfo "$info") + access_point=$(get_access_point_from_iwinfo "$info") + + case "$mode" in + Client) + case "$access_point" in + ''|Not-Associated|00:00:00:00:00:00) return 1 ;; + esac + return 0 + ;; + Master) + case "$signal" in + ''|unknown|0) signal_inactive=1 ;; + *) signal_inactive=0 ;; + esac + case "$bitrate" in + ''|unknown) bitrate_inactive=1 ;; + *) bitrate_inactive=0 ;; + esac + + # Ignore clearly inactive VAPs that report no real link metrics. + if [ "$signal_inactive" -eq 1 ] && [ "$bitrate_inactive" -eq 1 ]; then + return 1 + fi + return 0 + ;; + esac + + return 1 +} + get_freq_mhz_from_hostapd() { iface="$1" hapd="hostapd.$iface" @@ -141,6 +208,9 @@ emit_iface_records() { ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do [ -n "$iface" ] || continue is_iface_active_hostapd "$iface" || continue + if command -v iwinfo >/dev/null 2>&1; then + should_include_iwinfo_iface "$iface" || continue + fi ssid=$(get_ssid_from_hostapd "$iface") if [ -z "$ssid" ] && command -v iwinfo >/dev/null 2>&1; then ssid=$(get_ssid_from_iwinfo "$iface") @@ -153,27 +223,12 @@ emit_iface_records() { [ -n "$ssid" ] || ssid="$iface" printf '%s\t%s\t%s\n' "$iface" "$ssid" "$band" done - return - fi - - if command -v iw >/dev/null 2>&1; then - iw dev 2>/dev/null | awk '/Interface /{print $2}' | while IFS= read -r iface; do - [ -n "$iface" ] || continue - ssid="$iface" - band='' - if command -v iwinfo >/dev/null 2>&1; then - found_ssid=$(get_ssid_from_iwinfo "$iface") - [ -n "$found_ssid" ] && ssid="$found_ssid" - band=$(get_band_suffix "$iface") - fi - printf '%s\t%s\t%s\n' "$iface" "$ssid" "$band" - done - return fi if command -v iwinfo >/dev/null 2>&1; then - iwinfo 2>/dev/null | awk '{print $1}' | while IFS= read -r iface; do + iwinfo 2>/dev/null | awk '/^[^[:space:]].*ESSID:/{print $1}' | while IFS= read -r iface; do [ -n "$iface" ] || continue + should_include_iwinfo_iface "$iface" || continue ssid=$(get_ssid_from_iwinfo "$iface") [ -n "$ssid" ] || ssid="$iface" band=$(get_band_suffix "$iface") @@ -185,9 +240,17 @@ emit_iface_records() { emit_iface_records > "$tmp" awk -F '\t' ' - { rows[NR]=$0; ssid=$2; if (ssid != "") count[ssid]++ } + { + iface=$1 + if (!(iface in seen)) { + seen[iface]=1 + rows[++row_count]=$0 + ssid=$2 + if (ssid != "") count[ssid]++ + } + } END { - for (i=1; i<=NR; i++) { + for (i=1; i<=row_count; i++) { split(rows[i], f, "\t") iface=f[1]; ssid=f[2]; band=f[3] label=ssid diff --git a/snmp/Openwrt/wlNoiseFloor.sh b/snmp/Openwrt/wlNoiseFloor.sh index e4eaf9174..97a574af0 100755 --- a/snmp/Openwrt/wlNoiseFloor.sh +++ b/snmp/Openwrt/wlNoiseFloor.sh @@ -15,6 +15,20 @@ fi # Use tail, not head (i.e. last line, not first), as head exits immediately, breaks the pipe to cut! noise=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | grep -v "^$" | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 1 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/tail -1) +# Fallback for client-mode interfaces where assoclist is empty. +if [ -z "$noise" ]; then + noise=$(/usr/bin/iwinfo "$1" info 2>/dev/null | awk ' + /Noise:[[:space:]]*-?[0-9]+[[:space:]]*dBm/ { + for (i = 1; i <= NF; i++) { + if ($i == "Noise:") { + print $(i+1) + exit + } + } + } + ') +fi + # Return snmp result (multi-line for nsExtendOutputFull) /bin/echo "$noise" /bin/echo "# wlNoiseFloor for $1" diff --git a/snmp/Openwrt/wlRate.sh b/snmp/Openwrt/wlRate.sh index be2d76f17..21cac457c 100755 --- a/snmp/Openwrt/wlRate.sh +++ b/snmp/Openwrt/wlRate.sh @@ -55,6 +55,16 @@ if [ -z "$ratelist" ] && [ -n "$IWINFO_BIN" ]; then ') fi +# Final fallback for client-mode interfaces where iwinfo info exposes a single current rate. +if [ -z "$ratelist" ] && [ -n "$IWINFO_BIN" ]; then + ratelist=$($IWINFO_BIN "$iface" info 2>/dev/null | awk ' + /Bit Rate:[[:space:]]*[0-9]+(\.[0-9]+)?[[:space:]]*MBit\/s/ { + print $3 + exit + } + ') +fi + # Calculate min/avg/max rates min_rate=$(/bin/echo "$ratelist" | awk 'NR==1{min=$1} $10 ? sum/n : 0)}') diff --git a/snmp/Openwrt/wlSNR.sh b/snmp/Openwrt/wlSNR.sh index 907033827..9980db9b8 100755 --- a/snmp/Openwrt/wlSNR.sh +++ b/snmp/Openwrt/wlSNR.sh @@ -16,6 +16,28 @@ fi # Calculate result. Sum just for debug, and return integer (safest / easiest) snrlist=$(/usr/bin/iwinfo "$1" assoclist 2>/dev/null | /usr/bin/cut -s -d "/" -f 2 | /usr/bin/cut -s -d "(" -f 2 | /usr/bin/cut -s -d " " -f 2 | /usr/bin/cut -s -d ")" -f 1) +# Fallback for client-mode interfaces where assoclist is empty. +if [ -z "$snrlist" ]; then + snrlist=$(/usr/bin/iwinfo "$1" info 2>/dev/null | awk ' + /Signal:[[:space:]]*-?[0-9]+[[:space:]]*dBm/ && /Noise:[[:space:]]*-?[0-9]+[[:space:]]*dBm/ { + signal = "" + noise = "" + for (i = 1; i <= NF; i++) { + if ($i == "Signal:") { + signal = $(i+1) + } + if ($i == "Noise:") { + noise = $(i+1) + } + } + if (signal != "" && noise != "") { + print signal - noise + } + exit + } + ') +fi + min_snr=$(/bin/echo "$snrlist" | awk 'NR==1{min=$1} $10 ? sum/n : 0)}') max_snr=$(/bin/echo "$snrlist" | awk '$1>max{max=$1} END{printf "%d\n", (max=="" ? 0 : max)}') From 6b88fb418c006ea1d1db45e5a06573566306e70e Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 07:36:43 +0100 Subject: [PATCH 487/497] OpenWrt wlInterfaces: fix Master-mode filter and interface enumeration The signal/bitrate check at the AP interface level is unreliable: when no clients are associated the AP reports Signal=0 and Bit Rate=unknown, which previously caused the interface to be excluded from discovery entirely. Simplify should_include_iwinfo_iface for Master mode: include any AP with a valid ESSID. Also replace the fragile 'iwinfo (no args)' enumeration with iw-dev + sysfs fallback, which works on drivers (e.g. ath11k/ath12k on GL.iNet Flint 3) that do not populate iwinfo's interface listing. Fixes: wireless sensors disappearing when device is idle or when ubus hostapd.* is unavailable (e.g. GL.iNet BE9300). --- snmp/Openwrt/wlInterfaces.sh | 41 +++++++++++++++--------------------- 1 file changed, 17 insertions(+), 24 deletions(-) diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh index d3a9e5b62..1423d6914 100755 --- a/snmp/Openwrt/wlInterfaces.sh +++ b/snmp/Openwrt/wlInterfaces.sh @@ -75,14 +75,6 @@ get_mode_from_iwinfo() { printf '%s\n' "$1" | sed -n 's/.*Mode:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 } -get_signal_from_iwinfo() { - printf '%s\n' "$1" | sed -n 's/.*Signal:[[:space:]]*\([^ ]\+\)[[:space:]]*dBm.*/\1/p' | head -1 -} - -get_bitrate_from_iwinfo() { - printf '%s\n' "$1" | sed -n 's/.*Bit Rate:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 -} - get_access_point_from_iwinfo() { printf '%s\n' "$1" | sed -n 's/.*Access Point:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 } @@ -101,8 +93,6 @@ should_include_iwinfo_iface() { printf '%s\n' "$info" | grep -q '(VLAN)' && return 1 mode=$(get_mode_from_iwinfo "$info") - signal=$(get_signal_from_iwinfo "$info") - bitrate=$(get_bitrate_from_iwinfo "$info") access_point=$(get_access_point_from_iwinfo "$info") case "$mode" in @@ -113,19 +103,9 @@ should_include_iwinfo_iface() { return 0 ;; Master) - case "$signal" in - ''|unknown|0) signal_inactive=1 ;; - *) signal_inactive=0 ;; - esac - case "$bitrate" in - ''|unknown) bitrate_inactive=1 ;; - *) bitrate_inactive=0 ;; - esac - - # Ignore clearly inactive VAPs that report no real link metrics. - if [ "$signal_inactive" -eq 1 ] && [ "$bitrate_inactive" -eq 1 ]; then - return 1 - fi + # Master-mode AP: include if ESSID is valid (checked above). + # Signal/bitrate at the AP interface level report 0/unknown when + # no clients are associated, so they cannot be used as filters. return 0 ;; esac @@ -133,6 +113,19 @@ should_include_iwinfo_iface() { return 1 } +# Enumerate wireless interface names using iw (nl80211) or sysfs. +# More reliable than parsing `iwinfo` without arguments. +enum_wireless_ifaces() { + if command -v iw >/dev/null 2>&1; then + iw dev 2>/dev/null | awk '/[[:space:]]Interface[[:space:]]/{print $2}' + return + fi + for p in /sys/class/net/*/phy80211 /sys/class/net/*/wireless; do + [ -d "$p" ] || continue + basename "$(dirname "$p")" + done | sort -u +} + get_freq_mhz_from_hostapd() { iface="$1" hapd="hostapd.$iface" @@ -226,7 +219,7 @@ emit_iface_records() { fi if command -v iwinfo >/dev/null 2>&1; then - iwinfo 2>/dev/null | awk '/^[^[:space:]].*ESSID:/{print $1}' | while IFS= read -r iface; do + enum_wireless_ifaces | while IFS= read -r iface; do [ -n "$iface" ] || continue should_include_iwinfo_iface "$iface" || continue ssid=$(get_ssid_from_iwinfo "$iface") From 6914d095e1507fc5715e65f74b8a9b00007ab035 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 07:43:12 +0100 Subject: [PATCH 488/497] OpenWrt wlInterfaces: never apply iwinfo filter to hostapd-managed interfaces The should_include_iwinfo_iface check was incorrectly applied to interfaces that are already validated by is_iface_active_hostapd. On MLO/Wi-Fi 7 routers (GL.iNet BE9300, ath12k), iwinfo returns ESSID:unknown for wlanXX interfaces even when they are fully active, causing all hostapd-managed interfaces to be filtered out and leaving wlInterfaces.sh with empty output. The iwinfo-based filter is only meaningful for the enum_wireless_ifaces fallback path, which handles interfaces not managed by hostapd (e.g. Client-mode uplink stations like phy1-sta0 on risc-ap). --- snmp/Openwrt/wlInterfaces.sh | 3 --- 1 file changed, 3 deletions(-) diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh index 1423d6914..a53a2c94d 100755 --- a/snmp/Openwrt/wlInterfaces.sh +++ b/snmp/Openwrt/wlInterfaces.sh @@ -201,9 +201,6 @@ emit_iface_records() { ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do [ -n "$iface" ] || continue is_iface_active_hostapd "$iface" || continue - if command -v iwinfo >/dev/null 2>&1; then - should_include_iwinfo_iface "$iface" || continue - fi ssid=$(get_ssid_from_hostapd "$iface") if [ -z "$ssid" ] && command -v iwinfo >/dev/null 2>&1; then ssid=$(get_ssid_from_iwinfo "$iface") From d12ef8a043ecb10355da3ef0665f2fa1d204ce96 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 08:00:51 +0100 Subject: [PATCH 489/497] OpenWrt wlInterfaces: fix Client mode parsing and avoid fallback re-adding hostapd ifaces - Parse Mode from the dedicated line so Client interfaces are not mis-read as HT Mode (e.g. VHT80), which dropped phy1-sta0 uplink sensors. - Keep a narrow hostapd-side filter for clearly inactive Master VAPs (Signal=0 and Bit Rate=unknown). - Exclude hostapd-managed interfaces from fallback enumeration so filtered AP VAPs are not reintroduced by iw/iwinfo scan. --- snmp/Openwrt/wlInterfaces.sh | 37 +++++++++++++++++++++++++++++++++++- 1 file changed, 36 insertions(+), 1 deletion(-) diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh index a53a2c94d..6970e9a45 100755 --- a/snmp/Openwrt/wlInterfaces.sh +++ b/snmp/Openwrt/wlInterfaces.sh @@ -72,7 +72,15 @@ get_iwinfo_info() { } get_mode_from_iwinfo() { - printf '%s\n' "$1" | sed -n 's/.*Mode:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 + printf '%s\n' "$1" | awk '/^[[:space:]]*Mode:[[:space:]]/ {print $2; exit}' +} + +get_signal_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Signal:[[:space:]]*\([^ ]\+\)[[:space:]]*dBm.*/\1/p' | head -1 +} + +get_bitrate_from_iwinfo() { + printf '%s\n' "$1" | sed -n 's/.*Bit Rate:[[:space:]]*\([^ ]\+\).*/\1/p' | head -1 } get_access_point_from_iwinfo() { @@ -113,6 +121,29 @@ should_include_iwinfo_iface() { return 1 } +is_clearly_inactive_master_iface() { + iface="$1" + info=$(get_iwinfo_info "$iface") + [ -n "$info" ] || return 1 + + mode=$(get_mode_from_iwinfo "$info") + [ "$mode" = "Master" ] || return 1 + + signal=$(get_signal_from_iwinfo "$info") + bitrate=$(get_bitrate_from_iwinfo "$info") + + # Some idle/placeholder VAPs report Signal=0 and unknown bitrate. + # Treat only this exact combination as inactive. + [ "$signal" = "0" ] || return 1 + [ "$bitrate" = "unknown" ] || return 1 + return 0 +} + +is_hostapd_managed_iface() { + iface="$1" + ubus list "hostapd.$iface" >/dev/null 2>&1 +} + # Enumerate wireless interface names using iw (nl80211) or sysfs. # More reliable than parsing `iwinfo` without arguments. enum_wireless_ifaces() { @@ -201,6 +232,9 @@ emit_iface_records() { ubus list hostapd.* 2>/dev/null | sed 's/^hostapd\.//' | while IFS= read -r iface; do [ -n "$iface" ] || continue is_iface_active_hostapd "$iface" || continue + if command -v iwinfo >/dev/null 2>&1; then + is_clearly_inactive_master_iface "$iface" && continue + fi ssid=$(get_ssid_from_hostapd "$iface") if [ -z "$ssid" ] && command -v iwinfo >/dev/null 2>&1; then ssid=$(get_ssid_from_iwinfo "$iface") @@ -218,6 +252,7 @@ emit_iface_records() { if command -v iwinfo >/dev/null 2>&1; then enum_wireless_ifaces | while IFS= read -r iface; do [ -n "$iface" ] || continue + is_hostapd_managed_iface "$iface" && continue should_include_iwinfo_iface "$iface" || continue ssid=$(get_ssid_from_iwinfo "$iface") [ -n "$ssid" ] || ssid="$iface" From e53b2c099c7fac74e3a552298cbeecbac48c7aba Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 08:17:30 +0100 Subject: [PATCH 490/497] OpenWrt wlInterfaces: keep active uplink VAPs using hostapd airtime signal Refine the inactive Master filter to check hostapd airtime utilization before classifying Signal=0 / Bit Rate=unknown as inactive. This keeps real active uplink VAPs (e.g. ap1 wl1-ap1 uplink_ssid) while still dropping placeholder idle VAPs (e.g. uplink_ssid24). --- snmp/Openwrt/wlInterfaces.sh | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/snmp/Openwrt/wlInterfaces.sh b/snmp/Openwrt/wlInterfaces.sh index 6970e9a45..245c36a61 100755 --- a/snmp/Openwrt/wlInterfaces.sh +++ b/snmp/Openwrt/wlInterfaces.sh @@ -132,6 +132,18 @@ is_clearly_inactive_master_iface() { signal=$(get_signal_from_iwinfo "$info") bitrate=$(get_bitrate_from_iwinfo "$info") + # If hostapd reports airtime usage, keep this interface. + # This preserves active VAPs that still show Signal=0/Bit Rate=unknown in iwinfo. + air_util=$(get_hostapd_airtime_utilization "$iface") + case "$air_util" in + ''|unknown) ;; + *) + if [ "$air_util" -gt 0 ] 2>/dev/null; then + return 1 + fi + ;; + esac + # Some idle/placeholder VAPs report Signal=0 and unknown bitrate. # Treat only this exact combination as inactive. [ "$signal" = "0" ] || return 1 @@ -164,6 +176,15 @@ get_freq_mhz_from_hostapd() { printf '%s' "$status_json" | get_json_value freq } +get_hostapd_airtime_utilization() { + iface="$1" + hapd="hostapd.$iface" + status_json=$(ubus call "$hapd" get_status 2>/dev/null || true) + if command -v jsonfilter >/dev/null 2>&1; then + printf '%s' "$status_json" | jsonfilter -e '@.airtime.utilization' 2>/dev/null || true + fi +} + get_band_suffix() { iface="$1" freq_mhz_hint="${2:-}" From 8b11c499c0418a963a334b762cdd01459b5548b7 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 16:40:54 +0100 Subject: [PATCH 491/497] OpenWrt setup-snmpd: make installs path-safe and support non-interactive apply - Install scripts from setup-snmpd.sh directory instead of current cwd - Add --yes and --no-restart flags for automation - Ensure /etc/config/snmpd exists and keep managed-block refresh idempotent - Keep backups before modifying snmpd config --- snmp/Openwrt/setup-snmpd.sh | 87 ++++++++++++++++++++++++++++--------- 1 file changed, 66 insertions(+), 21 deletions(-) diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index 49ada323d..d882d01aa 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -1,11 +1,37 @@ #!/bin/sh +set -eu # setup-snmpd.sh -# Installation and configuration script for OpenWrt SNMP monitoring -# This script sets up all necessary scripts and generates the snmpd config +# Install OpenWrt LibreNMS helper scripts and optionally apply generated +# SNMP extend configuration. SCRIPT_DIR="/etc/librenms" -BACKUP_DIR="/etc/librenms/backup" +BACKUP_DIR="$SCRIPT_DIR/backup" +SOURCE_DIR=$(cd -- "$(dirname -- "$0")" && pwd) + +AUTO_YES=0 +NO_RESTART=0 + +while [ "$#" -gt 0 ]; do + case "$1" in + -y|--yes) AUTO_YES=1 ;; + --no-restart) NO_RESTART=1 ;; + -h|--help) + cat <<'EOF' +Usage: setup-snmpd.sh [--yes|-y] [--no-restart] + + -y, --yes Apply generated snmpd config without prompt + --no-restart Do not restart snmpd after applying config +EOF + exit 0 + ;; + *) + echo "Unknown option: $1" >&2 + exit 2 + ;; + esac + shift +done echo "OpenWrt SNMPD Setup Script" echo "==========================" @@ -43,6 +69,21 @@ remove_managed_snmpd_sections() { mv "$tmp_clean" /etc/config/snmpd } +apply_generated_snmpd_block() { + tmp_block=$(mktemp) + tmp_new=$(mktemp) + + "$SCRIPT_DIR/snmpd-config-generator.sh" > "$tmp_block" + + # Remove previously managed sections first (legacy and marker-based). + remove_managed_snmpd_sections + + # Append exactly one fresh generated block. + cat /etc/config/snmpd "$tmp_block" > "$tmp_new" + mv "$tmp_new" /etc/config/snmpd + rm -f "$tmp_block" +} + # Create directories echo "Creating directories..." mkdir -p "$SCRIPT_DIR" @@ -53,6 +94,8 @@ if [ -f /etc/config/snmpd ]; then timestamp=$(date +%Y%m%d_%H%M%S) echo "Backing up existing /etc/config/snmpd to $BACKUP_DIR/snmpd.$timestamp" cp /etc/config/snmpd "$BACKUP_DIR/snmpd.$timestamp" +else + touch /etc/config/snmpd fi # Copy scripts to /etc/librenms/ @@ -61,12 +104,13 @@ echo "Installing monitoring scripts to $SCRIPT_DIR..." scripts="wlInterfaces.sh wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh distro.sh snmpd-config-generator.sh" for script in $scripts; do - if [ -f "$script" ]; then - cp "$script" "$SCRIPT_DIR/" + src="$SOURCE_DIR/$script" + if [ -f "$src" ]; then + cp "$src" "$SCRIPT_DIR/" chmod +x "$SCRIPT_DIR/$script" echo " ✓ Installed $script" else - echo " ✗ Warning: $script not found in current directory" + echo " ✗ Warning: $script not found in $SOURCE_DIR" fi done @@ -84,28 +128,29 @@ echo " 3. Restart snmpd: /etc/init.d/snmpd restart" echo "" echo "Setup complete!" -# Ask for confirmation -printf "Do you want to update the SNMP configuration? [Y/n]: " -read -r answer - -# Convert to lowercase and check (default to 'y' if empty) -answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]') +if [ "$AUTO_YES" -eq 1 ]; then + answer="y" +else + printf "Do you want to update the SNMP configuration? [Y/n]: " + read -r answer + answer=$(echo "$answer" | tr '[:upper:]' '[:lower:]') +fi if [ -z "$answer" ] || [ "$answer" = "y" ]; then echo "Updating snmpd configuration..." - # 1. Backup existing config - cp /etc/config/snmpd /etc/config/snmpd-backup - - # 2. Remove previously managed LibreNMS wireless sections - remove_managed_snmpd_sections + # Extra one-shot backup before write + cp /etc/config/snmpd /etc/config/snmpd-backup - # 3. Append one fresh generated config block + # Write exactly one fresh generated LibreNMS block. chmod +x "$SCRIPT_DIR/snmpd-config-generator.sh" - "$SCRIPT_DIR/snmpd-config-generator.sh" >> /etc/config/snmpd + apply_generated_snmpd_block - # 4. Restart the service - /etc/init.d/snmpd restart + if [ "$NO_RESTART" -eq 0 ]; then + /etc/init.d/snmpd restart + else + echo "Skipped snmpd restart (--no-restart)." + fi echo "Done! Service restarted." else From a554addf6892953b84a2037280ac6ff1f8d3dd65 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 16:51:02 +0100 Subject: [PATCH 492/497] OpenWrt setup-snmpd: skip self-copy when source equals destination --- snmp/Openwrt/setup-snmpd.sh | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index d882d01aa..9651b2952 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -105,10 +105,16 @@ scripts="wlInterfaces.sh wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh w for script in $scripts; do src="$SOURCE_DIR/$script" + dst="$SCRIPT_DIR/$script" if [ -f "$src" ]; then - cp "$src" "$SCRIPT_DIR/" - chmod +x "$SCRIPT_DIR/$script" - echo " ✓ Installed $script" + if [ "$src" = "$dst" ]; then + chmod +x "$dst" + echo " ✓ Using existing $script" + else + cp "$src" "$SCRIPT_DIR/" + chmod +x "$dst" + echo " ✓ Installed $script" + fi else echo " ✗ Warning: $script not found in $SOURCE_DIR" fi From ed1e42f0bc0a3efdbcd003c9aa2d1f4b29dd1c9b Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 16:51:40 +0100 Subject: [PATCH 493/497] OpenWrt setup-snmpd: fix completion message for --no-restart --- snmp/Openwrt/setup-snmpd.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index 9651b2952..eced51b2b 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -154,11 +154,11 @@ if [ -z "$answer" ] || [ "$answer" = "y" ]; then if [ "$NO_RESTART" -eq 0 ]; then /etc/init.d/snmpd restart + echo "Done! Service restarted." else echo "Skipped snmpd restart (--no-restart)." + echo "Done! Configuration updated." fi - - echo "Done! Service restarted." else echo "Aborted. No changes made." exit 1 From 2d23ad9776ef70771905d870b0389df276d7e57d Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 16:53:24 +0100 Subject: [PATCH 494/497] OpenWrt setup-snmpd: ensure distro and hardware extends exist --- snmp/Openwrt/setup-snmpd.sh | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index eced51b2b..2d46457d3 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -69,6 +69,34 @@ remove_managed_snmpd_sections() { mv "$tmp_clean" /etc/config/snmpd } +has_extend_name() { + name="$1" + grep -Eq "^[[:space:]]*option[[:space:]]+name[[:space:]]+'$name'[[:space:]]*$" /etc/config/snmpd +} + +ensure_base_os_extends() { + if ! has_extend_name distro; then + cat >> /etc/config/snmpd <<'EOF' + +config extend + option name 'distro' + option prog '/etc/librenms/distro.sh' +EOF + echo " + Added missing extend: distro" + fi + + if ! has_extend_name hardware; then + cat >> /etc/config/snmpd <<'EOF' + +config extend + option name 'hardware' + option prog '/bin/cat' + option args '/sys/firmware/devicetree/base/model' +EOF + echo " + Added missing extend: hardware" + fi +} + apply_generated_snmpd_block() { tmp_block=$(mktemp) tmp_new=$(mktemp) @@ -150,6 +178,7 @@ if [ -z "$answer" ] || [ "$answer" = "y" ]; then # Write exactly one fresh generated LibreNMS block. chmod +x "$SCRIPT_DIR/snmpd-config-generator.sh" + ensure_base_os_extends apply_generated_snmpd_block if [ "$NO_RESTART" -eq 0 ]; then From 98cc6d004bd48910a08400f12ac9254cc3454377 Mon Sep 17 00:00:00 2001 From: perceival Date: Wed, 11 Mar 2026 16:54:13 +0100 Subject: [PATCH 495/497] OpenWrt setup-snmpd: ensure SNMP system section exists --- snmp/Openwrt/setup-snmpd.sh | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index 2d46457d3..71e11df18 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -97,6 +97,21 @@ EOF fi } +ensure_system_section() { + if ! grep -Eq "^[[:space:]]*config[[:space:]]+system[[:space:]]+'system'[[:space:]]*$" /etc/config/snmpd; then + hostname=$(uname -n 2>/dev/null || echo openwrt) + cat >> /etc/config/snmpd < Date: Fri, 13 Mar 2026 07:35:48 +0100 Subject: [PATCH 496/497] OpenWrt distro: inline grep/sed pipeline, remove distro.sh Replace the distro.sh helper script with an inline shell pipeline in both snmpd-base-config and setup-snmpd.sh: /bin/grep OpenWrt /etc/banner | /bin/sed s/.*OpenWrt/OpenWrt/ | /usr/bin/head -1 - Delete snmp/Openwrt/distro.sh (no longer needed) - Update the 'distro' extend in snmpd-base-config to use /bin/sh with the pipeline as option args - Update the fallback ensure_base_os_extends() block in setup-snmpd.sh identically - Remove distro.sh from the setup-snmpd.sh install scripts list --- snmp/Openwrt/distro.sh | 6 ------ snmp/Openwrt/setup-snmpd.sh | 5 +++-- snmp/Openwrt/snmpd-base-config | 3 ++- 3 files changed, 5 insertions(+), 9 deletions(-) delete mode 100755 snmp/Openwrt/distro.sh diff --git a/snmp/Openwrt/distro.sh b/snmp/Openwrt/distro.sh deleted file mode 100755 index 3af86780f..000000000 --- a/snmp/Openwrt/distro.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh - -# distro.sh -# Extracts OpenWrt version string from banner (from "OpenWrt" onwards) - -grep OpenWrt /etc/banner | sed 's/.*OpenWrt/OpenWrt/' | head -1 diff --git a/snmp/Openwrt/setup-snmpd.sh b/snmp/Openwrt/setup-snmpd.sh index 71e11df18..784c8bb19 100755 --- a/snmp/Openwrt/setup-snmpd.sh +++ b/snmp/Openwrt/setup-snmpd.sh @@ -80,7 +80,8 @@ ensure_base_os_extends() { config extend option name 'distro' - option prog '/etc/librenms/distro.sh' + option prog '/bin/sh' + option args '-c "/bin/grep OpenWrt /etc/banner | /bin/sed s/.*OpenWrt/OpenWrt/ | /usr/bin/head -1"' EOF echo " + Added missing extend: distro" fi @@ -144,7 +145,7 @@ fi # Copy scripts to /etc/librenms/ echo "Installing monitoring scripts to $SCRIPT_DIR..." -scripts="wlInterfaces.sh wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh distro.sh snmpd-config-generator.sh" +scripts="wlInterfaces.sh wlClients.sh wlFrequency.sh wlNoiseFloor.sh wlRate.sh wlSNR.sh lm-sensors-pass.sh snmpd-config-generator.sh" for script in $scripts; do src="$SOURCE_DIR/$script" diff --git a/snmp/Openwrt/snmpd-base-config b/snmp/Openwrt/snmpd-base-config index d9a31a61d..c6e603701 100644 --- a/snmp/Openwrt/snmpd-base-config +++ b/snmp/Openwrt/snmpd-base-config @@ -83,7 +83,8 @@ config access 'private_access' # System information config extend option name 'distro' - option prog '/etc/librenms/distro.sh' + option prog '/bin/sh' + option args '-c "/bin/grep OpenWrt /etc/banner | /bin/sed s/.*OpenWrt/OpenWrt/ | /usr/bin/head -1"' config extend option name 'hardware' From 75e18846014b75844bc008d253edbb7714dd8f70 Mon Sep 17 00:00:00 2001 From: perceival Date: Fri, 13 Mar 2026 07:41:37 +0100 Subject: [PATCH 497/497] OpenWrt snmpd-base-config: rebase on upstream OpenWrt defaults Replace the previous hand-crafted config (which contained example community strings and a hard-coded source CIDR) with the standard OpenWrt snmpd default layout: - Use 'public'/'private' community names and 'default' source (matches stock OpenWrt /etc/config/snmpd defaults) - Add config system section with placeholder sysLocation/Contact/Name - Add config exec for filedescriptors (standard OpenWrt entry) - Retain commented trap/trapsess stanzas for reference - Keep br-lan as engineidnic (correct for OpenWrt LAN bridge) - Append LibreNMS extends (distro, hardware, interfaces, clients-wlan, lm-sensors) after the standard sections --- snmp/Openwrt/snmpd-base-config | 216 ++++++++++++++++++++------------- 1 file changed, 129 insertions(+), 87 deletions(-) diff --git a/snmp/Openwrt/snmpd-base-config b/snmp/Openwrt/snmpd-base-config index c6e603701..b799b4034 100644 --- a/snmp/Openwrt/snmpd-base-config +++ b/snmp/Openwrt/snmpd-base-config @@ -1,86 +1,135 @@ - config agent - option agentaddress 'UDP:161,UDP6:161' + option agentaddress UDP:161,UDP6:161 config agentx - option agentxsocket '/var/run/agentx.sock' - -config com2sec 'public' - option secname 'ro' - option source 'default' - option community 'public' - -config com2sec 'private' - option secname 'rw' - option source 'localhost' - option community 'private' - -config com2sec6 'public6' - option secname 'ro' - option source 'default' - option community 'public' - -config com2sec6 'private6' - option secname 'rw' - option source 'localhost' - option community 'private' - -config group 'public_v1' - option group 'public' - option version 'v1' - option secname 'ro' - -config group 'public_v2c' - option group 'public' - option version 'v2c' - option secname 'ro' - -config group 'public_usm' - option group 'public' - option version 'usm' - option secname 'ro' - -config group 'private_v1' - option group 'private' - option version 'v1' - option secname 'rw' - -config group 'private_v2c' - option group 'private' - option version 'v2c' - option secname 'rw' - -config group 'private_usm' - option group 'private' - option version 'usm' - option secname 'rw' - -config view 'all' - option viewname 'all' - option type 'included' - option oid '.1' - -config access 'public_access' - option group 'public' - option context 'none' - option version 'any' - option level 'noauth' - option prefix 'exact' - option read 'all' - option write 'none' - option notify 'none' - -config access 'private_access' - option group 'private' - option context 'none' - option version 'any' - option level 'noauth' - option prefix 'exact' - option read 'all' - option write 'all' - option notify 'all' - -# System information + option agentxsocket /var/run/agentx.sock + +config com2sec public + option secname ro + option source default + option community public + +config com2sec private + option secname rw + option source localhost + option community private + +config com2sec6 public6 + option secname ro + option source default + option community public + +config com2sec6 private6 + option secname rw + option source localhost + option community private + +config group public_v1 + option group public + option version v1 + option secname ro + +config group public_v2c + option group public + option version v2c + option secname ro + +config group public_usm + option group public + option version usm + option secname ro + +config group private_v1 + option group private + option version v1 + option secname rw + +config group private_v2c + option group private + option version v2c + option secname rw + +config group private_usm + option group private + option version usm + option secname rw + +config view all + option viewname all + option type included + option oid .1 + +config access public_access + option group public + option context none + option version any + option level noauth + option prefix exact + option read all + option write none + option notify none + +config access private_access + option group private + option context none + option version any + option level noauth + option prefix exact + option read all + option write all + option notify all + +config system + option sysLocation'office' + option sysContact'bofh@example.com' + option sysName'HeartOfGold' +#option sysServices72 +#option sysDescragent-local/'adult playground' +#option sysObjectID'1.2.3.4' + +config exec + option namefiledescriptors + option prog/bin/cat + option args/proc/sys/fs/file-nr +#option miboid1.2.3.4 + +config engineid +#option engineid 'LEDE' + option engineidtype '3' + option engineidnic 'br-lan' + +#config trapcommunity 'trapcommunity' +#option community 'public' + +#config trapsink +#option host 'nms.system.com' +#option community 'public' +#option port '162' + +#config trap2sink +#option host 'nms.system.com' +#option community 'secret' +#option port '162' + +#config informsink +#option host 'nms.sytem.com' +#option community 'public' +#option port '162' + +#config authtrapenable 'authtrapenable' +#option enable '1' + +#config v1trapaddress 'v1trapaddress' +#option host '1.2.3.4' + +#config trapsess 'trapsess' +#option trapsess'-v 3 -e 0x80001f88808c18d3f7b0000 -u trapuser -a MD5 -A administrator -l authPriv -x DES -X rootpasswd udp:127.0.0.1:162' + +config snmpd general + option enabled '1' +#list network 'wan' + +# LibreNMS extends config extend option name 'distro' option prog '/bin/sh' @@ -108,10 +157,3 @@ config pass option name 'lm-sensors' option prog '/etc/librenms/lm-sensors-pass.sh' option miboid '.1.3.6.1.4.1.2021.13.16.2.1' - -config engineid - option engineidtype '3' - option engineidnic 'br-lan' - -config snmpd 'general' - option enabled '1'