Posted on

dockerfile for elasticsearch

Here’s a dockerfile to install Elasticsearch . Change the version number to match the tar ball you want to use.
# Elasticsearch Dockerfile
#
# Simple example for running ES 2.4.4

#

# Pull base image.

FROM java:8-jre

ENV ES_PKG_NAME elasticsearch-2.4.4

# Install Elasticsearch.
COPY config/$ES_PKG_NAME.tar.gz /tmp/
RUN tar -C /usr/share/ -xvzf /tmp/$ES_PKG_NAME.tar.gz && \
mkdir /var/log/elasticsearch && \
mkdir /var/lib/elasticsearch && \
mkdir /var/run/elasticsearch && \
mkdir /etc/elasticsearch && \
mv /usr/share/$ES_PKG_NAME /usr/share/elasticsearch && \
mkdir /usr/share/elasticsearch/config/scripts && \
groupadd -g 1000 elasticsearch && useradd elasticsearch -u 1000 -g 1000
COPY config/elasticsearch.yml.staging /etc/elasticsearch/elasticsearch.yml
COPY config/elasticsearch /etc/init.d/elasticsearch
COPY config/log4j2.properties /etc/elasticsearch/
COPY config/jvm.options /etc/elasticsearch/
COPY config/logging.yml /etc/elasticsearch/
RUN chown -R elasticsearch.elasticsearch /usr/share/elasticsearch && \
chown -R elasticsearch.elasticsearch /var/log/elasticsearch && \
chown -R elasticsearch.elasticsearch /var/lib/elasticsearch && \
chown -R elasticsearch.elasticsearch /var/lib/elasticsearch && \
chown -R elasticsearch.elasticsearch /var/run/elasticsearch && \
chown -R elasticsearch.elasticsearch /etc/elasticsearch && \
echo End of elasticsearch base install

# Define mountable directories.
VOLUME /data
# Define working directory.
#WORKDIR /data/elasticsearch/

COPY ./docker-entrypoint.sh /
ENTRYPOINT ["/docker-entrypoint.sh"]
CMD ["elasticsearch"]
# Expose ports.
# - 9200: HTTP
# - 9300: transport
EXPOSE 9200
EXPOSE 9300


This is the docker-entrypoint.sh:
#!/bin/bash
if [ "$1" = 'elasticsearch' ]; then
start-stop-daemon -d /usr/share/elasticsearch --start -u elasticsearch -c elasticsearch --pidfile /var/run/elasticsearch/elasticsearch.pid --exec /usr/share/elasticsearch/bin/elasticsearch -- -p /var/run/elasticsearch/elasticsearch.pid --default.path.home=/usr/share/elasticsearch --default.path.logs=/var/log/elasticsearch/ --default.path.data=/var/lib/elasticsearch --default.path.conf=/etc/elasticsearch/
# then assume that user wants to run his own process,
# for example a `bash` shell to explore this image
else
exec "$@"
fi

and these files are in config and are taken from the elasticsearch package:
config/
elasticsearch
elasticsearch-2.4.4.tar.gz
elasticsearch.yml.staging
jvm.options
log4j2.properties
logging.yml

Posted on

using jq to parse json

Here’s a quickly written script to simply query my json file.
The json file was created from an elasticsearch index using estab from https://github.com/miku/estab to dump the contents to a file.

estab -indices=myindex -raw > /tmp/myindex.json

I thought I would try using jq to query the resulting large json file. I found it a bit awkward to get the syntax correct and decided to write a simple frontend to allow me or others to query the json file more easily. This was a first attempt ….
<!DOCTYPE html>
<html>
<HEAD>
<?php
// ##  Set default values and GET / POST values
 
$scriptName = $_SERVER['SCRIPT_NAME'];
$datafile = "/var/www/html/easisearch/ela1.json";
$jq = exec('which jq', $retval);
$jq = $jq . " -r ";
$grepcmd = " | " . exec('which egrep', $retval);
$outputfile = "/var/www/html/tmp/jqoutput.csv";
$weboutputfile = "/tmp/jqoutput.csv";
$statusfile = "/tmp/status.txt";
$displaysize = 1000;
$grepargs = $_REQUEST['grepargs'];
$jqargs = $_REQUEST['jqargs'];

// this builds the contents of the select drop-down
// and is based on the structure of the json file 
$field[".aetopiaMetadata |.aetopiaId"] = "aetopiaId";
$field[".aetopiaMetadata |.metadata | .createDate"] = "aetopia-createDate";
$field[".aetopiaMetadata |.metadata | .id"] = "aetopia-id";
$field[".aetopiaMetadata |.metadata | .lastModifiedDate"] = "aetopia-lastModifiedDate";
$field[".aetopiaMetadata |.metadata | .metadata | .DOC_TEXT"] = "aetopia-DOC_TEXT";
$field[".aetopiaMetadata |.metadata | .originalFileName"] = "aetopia-origFileName";
$field[".synopsis"] = "synopsis";
$field[".title"] = "title";
$field[".usageTrafficLight"] = "usageTrafficLight";
$field[".tvFormat"] = "tvFormat";
$field[".transmissionMonth"] = "transmissionMonth";
$field[".transmissionDay"] = "transmissionDay";
$field[".transmissionDate"] = "transmissionDate";
$field[".transcription | .version"] = "transcription version";
$field[".transcription | .updated"] = "transcription updated";
$field[".transcription | .text"] = "transcription text";
$field[".thumbnailUrl"] = "thumbnailUrl";
$field[".sync | .requiresSearchSync"] = "requiresSearchSync";
$field[".sync | .lastTopicsUpdate"] = "lastTopicsUpdate";
$field[".sync | .lastSourceSync"] = "lastSourceSync";
$field[".sync | .lastSearchSync"] = "lastSearchSync";
$field[".sync | .lastSaved"] = "lastSaved";
$field[".sync | .enrichmentStatus"] = "enrichmentStatus";
$field[".sourceSystem"] = "sourceSystem";
$field[".sourceId"] = "sourceId";
$field[".owner"] = "owner";
$field[".modifiedDate"] = "modifiedDate";
$field[".clipName"] = "clipName";
$field[".clipDuration"] = "clipDuration";
$field[".assetType"] = "assetType";
$field[".archiveTapeIdItemNumber"] = "archiveTapeIdItemNumber";
$field[".archiveTapeId"] = "archiveTapeId";
$field[".createdDate"] = "createdDate";
$field[".deleted"] = "deleted";
$field[".deletedAtSource"] = "deletedAtSource";
$field[".files | .web | .url"] = "web url";
$field[".files | .web | .fileName"] = "web fileName";
$field[".files | .orig | .fileName"] = "orig fileName";
$field[".files | .orig | .url"] = "orig url";
$field[".id"] = "id";
$field[".lastModifiedBy"] = "lastModifiedBy";
 
// sort the array by value
asort($field);
// kill the job if Kill Job button is clicked 
if (isset($_REQUEST['killjob']))
   {
        exec("pkill jq");
   }
 // The json file can take a long time to parse
// so provide some way of indicating progress
if (isset($_REQUEST['chkprogress']))
     {
        $chkprogress = 1;
    }
 
print "easiSearch</HEAD><body>";
?>
<br/>
<form method="GET" action="<?php
echo htmlspecialchars($_SERVER['PHP_SELF']); ?>">
<table>
<tr><td valign="top" align="center">(SEARCH THIS)</td><td valign="top" align="center">  (FOR) </td><td align="center"  valign="top"> (OUTPUT THESE) </td> </tr>
<tr><td align="center"  valign="top">
<select  size=1 name="jqargsarray[]">
<?php
// build the dropdown list for which fields to search from
foreach($field as $x => $x_value)
        {
        print "<option value=\"$x\">$x_value</option>";
        }
 
?>
<option value="">All</option>
</select>
</td><td align="center"  valign="top">
   <input type="text" id="grepargs" name="grepargs"  value="<?php
echo $grepargs; ?>">
</td><td  valign="top" align="center">
<select multiple="multiple"  size=8 name="outputfieldsarray[]">
<?php
 // build the dropdown for which fields to output
foreach($field as $x => $x_value)
        {
        print "<option value=\"$x\">$x_value</option>";
        }
 
?>
  <option value="">All</option>
</select>
</td align="center"></tr>
</table><br/>
  <input type="submit" value="Search"  name="Search">
</form>
<?php
$jqargsarray = $_REQUEST['jqargsarray'];
// build the syntax for the jq command based on
// which fields have been selected
// if 'all' was selected, then just use grep
if ($jqargsarray[0] == "")
        {
        foreach($field as $x => $x_value)
                {
                $i++;
                if ($x != "")
                        {
                        $jqargs.= "($x)";
                        }
 
                if ($i < (count($field)))
                        {
                        $jqargs.= ",";
                        }
                }
 
        $jqargs = "'._source | [$jqargs] | @csv'";
        $pipetogrep = 1;
        }
  else
        {
        for ($i = 0; $i < count($jqargsarray); $i++)
                {
                $jqselect.= "select($jqargsarray[$i]|tostring|test(\"$grepargs\";\"i\")) |";
                $jqargs = "'._source | $jqselect ";
                }
        }
 
if ($grepargs == "")
        {
        $grepcmd = "";
        }
 
$outputfieldsarray = $_REQUEST['outputfieldsarray'];
 
if ($outputfieldsarray[0] == "")
        {
        foreach($field as $x => $x_value)
                {
                $outputfields.= "($x), ";
                }
 
        $outputfields = substr($outputfields, 0, -2);
        $outputfields = "[ $outputfields ]";
        $perlfilter = "";
        }
  else
        {
        if ($pipetogrep)
                {
 
                // need to parse out the fields using perl
  // if only some fields were selected
// this handles the condition where you are
// searching all fields for a value but only
// want to output selected fields
                $perlfilter = "| perl -MText::CSV -le '\$csv = Text::CSV->new({binary=>1}); while (\$row = \$csv->getline(STDIN)){print \"";
                foreach($outputfieldsarray as $sel)
                        {
                        $i = 0;
                        foreach($field as $x => $x_value)
                                {
                                if ($sel == $x)
                                        {
                                        $perlfilter.= "\$row->[$i],";
                                        break;
                                        }
 
                                $i++;
                                }
                        }
 
                $perlfilter.= "\"}'";
                }
          else
                {
                for ($i = 0; $i < count($outputfieldsarray); $i++)
                        {
                        $outputfields.= "($outputfieldsarray[$i])";
                        if ($i < (count($outputfieldsarray) - 1))
                                {
                                $outputfields.= ",";
                                }
                        }
 
                $outputfields = "[ $outputfields ]";
                }
        }
 
if (isset($_REQUEST['Search']))
        {
// kill any unfinished query if a new search is started
        exec("pkill jq");
        if ($pipetogrep)
                {
                $cmd = "cat $datafile | $jq $jqargs $grepcmd $grepargs $perlfilter";
                }
          else
                {
                $cmd = "cat $datafile | $jq $jqargs $outputfields | @csv'";
                }
        }
 
if ($chkprogress == 1)
        {
        getStatus();
        }
 
if (isset($_REQUEST['Search']))
        {
        exec("pkill jq");
        if ($pipetogrep)
                {
                $cmd = "cat $datafile | $jq $jqargs $grepcmd $grepargs $perlfilter";
                }
          else
                {
                $cmd = "cat $datafile | $jq $jqargs $outputfields | @csv'";
                }
 
        $handle = fopen("$statusfile", "w") or die("Unable to open file!");
        fwrite($handle, "$jqargs\n");
        fwrite($handle, "$cmd\n");
        fclose($handle);
        }
 
if ((isset($_REQUEST['Search'])) or ($chkprogress == 1))
        {
        print "<br/>executing:<br/> $cmd <br/><br/>\n";
        print "redirecting to: <a href=$weboutputfile> $outputfile</a><br/><br/>";
        $cmd.= " > $outputfile ";
        if (isset($_REQUEST['Search']))
                {
                $last_line = shell_exec("$cmd &");
                }
 
        print "<form method=\"POST\" action=\"";
        print htmlspecialchars($_SERVER['PHP_SELF']) . '">';
        print "<input type=\"submit\" value=\"Check progress\"  name=\"chkprogress\">";
        print "<input type=\"submit\" value=\"Kill job\"  name=\"killjob\">";
        print "</form>";
        }
 
if ($chkprogress)
        {
        showfile($outputfile);
        }
  else
        {
        if (!(isset($_REQUEST['Search'])))
                {
 
                // keep link to last job
                 getStatus();
                print "<br/>last executed:<br/> $cmd <br/><br/>\n";
                $filesize = filesize($outputfile);
                print "with $filesize bytes output to: <a href=$weboutputfile> $outputfile</a><br/><br/>";
                }
        }
 
// end of main program
 
function test_input($data)
        {
        $data = trim($data);
        $data = stripslashes($data);
        $data = htmlspecialchars($data);
        return $data;
        }
 
function showfile($filename)
        {
        exec("pgrep jq", $output, $return);
        if ($return == 0)
                {
                print "<br/>Still running!\n<br/><br/>";
                }
          else
                {
                print "<br/>No jq process running - assumed complete!\n<br/><br/>";
                }
 
        $filesize = filesize($filename);
        print "$filename file size: $filesize \n <br/>";
        $preview = 0;
        if ($preview)
                {
                if (file_exists($filename))
                        {
                        global $displaysize;
                        $displayed = 0;
                        $handle = fopen($filename, "r") or die("Unable to open file!");
 
                        //    {
 
                        if ($filesize > $displaysize)
                                {
                                print "<br/>Showing first $displaysize characters of $filesize\n<br/> ";
                                }
 
                        // print "<a href=$filename>$filename</a><br/><br/>\n";
 
                        while (!feof($handle))
                                {
                                $line = fgets($handle);
                                $displayed = strlen($line) + $displayed;
                                if ($displayed < $displaysize)
                                        {
                                        print "$line<br />";
                                        }
                                  else
                                        {
                                        break;
                                        }
                                }
 
                        fclose($handle);
                        }
                }
        }
 
function getStatus()
        {
        global $statusfile;
        global $jqargs;
        global $cmd;
        if (file_exists($statusfile))
                {
                $handle = fopen("$statusfile", "r");
                $jqargs = fgets($handle);
                $cmd = fgets($handle);
                fclose($handle);
                }
          else
                {
                $status = 1;
                }
 
        return $status;
        }
?>
</body>
</html>

 

Posted on

nrpe or nsca alternative for nagios checks

Don’t like running nrpe or nsca but do like Ansible’s approach?
Don’t like receiving around 20 Nagios alerts from every server when there is an issue such as a network outage?

Take the commands from nrpe.cfg that form each Nagios check and put them in to a shell script:
Put them in order of the most important or most useful to know first.
eg

CHECK="My process";
/usr/local/nagios/libexec/check_process -n myprocess
if [[ $? -ne 0 ]]; then
ssh mynagiosserver "echo -e 2\|10\|$HOSTNAME \|myprocess is unavailable, this is what needs to be done >/usr/local/nagios/var/remote-checks/$HOSTNAME"
exit
fi

# ... add all your other checks here
# Check disk space on sda1
CHECK="$CHECK Disk space ";
/usr/local/nagios/libexec/check_disk -w 10% -c 5% -p /dev/sda1
if [[ $? -ne 0 ]]; then
ssh mynagiosserver "echo -e 2\|10\|$HOSTNAME disk space alert\|Free disk space on sda1 >/usr/local/nagios/var/remote-checks/$HOSTNAME"
exit
fi
## Else all is OK
ssh mynagiosserver "echo -e 0\|10\|OK\|Checks completed OK: $CHECK>/usr/local/nagios/var/remote-checks/$HOSTNAME"

If you prefer, you can build up a string of all the failed checks, rather than stop checking after the first failure:
eg

if [[ $? -ne 0 ]]; then
CHECK="$CHECK Disk space alert"
fi

and at the end of the checks


if [[ $CHECK -ne "" ]]; then
ssh mynagiosserver "echo -e 2\|10\|$HOSTNAME $CHECK\|Free disk space on sda1 >/usr/local/nagios/var/remote-checks/$HOSTNAME"
fi

For the ssh to work, you will need to set up ssh keys. There are plenty of guides, just look for ssh-keygen and ssh-copy-id.

You will want the checks to execute regulary
I put this in the crontab for the nagios user.


# m h dom mon dow command
*/10 * * * * /usr/local/nagios/checks/10minutechecks.sh

You now have a single file per server on your monitoring server

Alternatives to ssh
- using a shared mount point from nfs, samba, etc and copying the check result file there instead of using ssh

Since we already use Nagios, I set up a Nagios check on the server to read the check file.


define service{
use generic-service
host_name myhost
service_description passive checks
check_interval 10
check_command check_passive!myhost
notifications_enabled 1
}

define command {
command_name check_passive
command_line $USER1$/check_passive.pl $ARG1$
}

and create /usr/local/nagios/libexec/check_passive.pl

#!/usr/bin/perl
use Time::Local;
use DateTime;
$num_args = $#ARGV + 1;
if ($num_args != 1) {
print "Specify name of file to check.\n";
exit
}
$file=$ARGV[0];
$datadir = "/usr/local/nagios/var/remote-checks/";
open(my $fh,"<","$datadir$file"); my $epoch_timestamp = (stat($fh))[9]; my $timestamp = localtime($epoch_timestamp); my $diff = -M "$datadir$file"; my $fileage = $diff *24 *60; #print "fileage is $fileage \n"; my @log=<$fh>;
close LOG;
($exitcode,$interval,$checkname,$checkdetail)= split /\|/,$log[0];
#print $exitcode,$interval,$checkname,$checkdetail;
if ($fileage>$interval) {
print "ERROR: No check report received since $timestamp for $file\n";
exit(2);
}
if ($exitcode != 0) {
print "ERROR: $file check $checkname failed, $checkdetail\n";
exit($exitcode);
}
# } # end of for loop
print "$file $checkname - $checkdetail ";
exit(0)

If this approach works well in a trial, you can modify your configuration management tools to automatically create these scripts instead of creating nrpe.cfg.

Posted on

from filebeat to logstash

In logstash.yml you can parse the data coming from the earlier filebeat example with a ‘filter’.

A ‘kv’ filter is for splitting data in key-value pairs and the default is to expect a comma as a separator.

The values are converted to strings unless otherwise specified.  The ‘mutuate’ section allows them to be converted to integers. Finally, the timestamp of the event needs to be when it happened and not when logstash received the data. Therefore, the ‘date’ filter is used to convert the value in logdate and use that as the timestamp .

filter {
if [type] == "mylog" {

kv {
field_split => ","

}
mutate {
convert => { "date" => "integer" }
convert => { "month" => "integer" }
convert => { "year" => "integer" }
convert => { "hour" => "integer" }
convert => { "minute" => "integer" }
convert => { "second" => "integer" }
}
date {
locale => "en"
match => [ "logdate" , "yyyyMMddHHmm" ]
target => "@timestamp"
}
}

}

Posted on

using filebeat to watch a log

In filebeat.yml,  add the file you are watching to the ‘paths’ section as follows:

paths:
- /var/log/my.log
### Logstash as output
logstash:
# The Logstash hosts
hosts: ["192.168.0.1:5044"]

This will forward the contents of my.log to logstash.

If, like me, you like to keep traffic and  load to a minimum,  then you can add this to filebeat.yml:

scan_frequency: 180s

And if, in your logstash config, you want to be able to know which input you are handing, then define the ‘type’ as follows:

document_type:  mylog

 

Posted on

from script to service

This is a handy wrapper to make that quickly written script behave like a ‘service’.  This is reprinted here and was originally written by Tomas Nevar.

It has been amended to run watchlog.pl.

#!/bin/sh
#############################################
# AUTHOR:   Tomas Nevar (tomas@lisenet.com)
# NAME:     init-service-template
# VERSION:  1.0
# DATE:     01/02/2016 (dd/mm/yy)
# LICENCE:  Copyleft free software
#############################################
#
# init-service-template:  Starts an init daemon
#
# chkconfig: 2345 99 01
# description: This is a template daemon.
#
# processname: $PROCESS_NAME
# pidfile: /var/run/$PROCESS_NAME.pid
#

### BEGIN INIT INFO
# Provides: $PROCESS_NAME
# Required-Start: $network
# Required-Stop: $network
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Starts the $PROCESS_NAME daemon
# Description: This is a template daemon.
### END INIT INFO

#############################################
# Revision History
#
# Version 1.0 - Tomas Nevar
# Change notes: Initial release, service calls a wrapper script.
#
#############################################

#
### These variables need setting
#
PROCESS_NAME="watchlog.pl";
PROCESS_USER="root";
PROCESS_PID_PATH="/var/run";
PROCESS_LOG_PATH="/var/log/"$PROCESS_NAME"";
PROCESS_SCRIPT="/usr/local/watchlog.pl";

# These are generated using the values above
PROCESS_PID=""$PROCESS_PID_PATH"/"$PROCESS_NAME".pid";
PROCESS_LOG=""$PROCESS_LOG_PATH"/"$PROCESS_NAME".log";

# Do not change this
RETVAL="0";
CLR_RED=$(tput setaf 1);
CLR_GREEN=$(tput setaf 2);
CLR_RESET=$(tput sgr0);

#############################################
#
### Sanity checks
#

# Ensure the $PROCESS_USER exists on the system
id -u "$PROCESS_USER" >/dev/null 2>&1;
RETVAL=$?;
if [ ! "$RETVAL" -eq 0 ]; then
echo -e "ERROR: "$PROCESS_USER" user does not exist on the system. Try running:
sudo useradd -m -s "$(echo $SHELL)" "$PROCESS_USER"";
exit 1;
fi

# Ensure the log folder $PROCESS_LOG_PATH exists
# and is owned by the $PROCESS_USER user.
if [ ! -d "$PROCESS_LOG_PATH" ]; then
mkdir -p "$PROCESS_LOG_PATH";
fi
chown -R "$PROCESS_USER":root "$PROCESS_LOG_PATH";

# Ensure the wrapper script $PROCESS_SCRIPT
# exists and is executable.
if [ ! -x "$PROCESS_SCRIPT" ]; then
echo "ERROR: Script "$PROCESS_SCRIPT" does not exits or is not executable.";
exit 1;
fi
#############################################
status() {
if [ -f "$PROCESS_PID" ]; then
RUNNING_PID=$(cat $PROCESS_PID);
IS_RUNNING=$(ps -ef|grep "$RUNNING_PID"|grep "$PROCESS_SCRIPT"|wc -l);
if [ "$IS_RUNNING" -gt 0 ]; then
echo ""$PROCESS_NAME" (pid "$RUNNING_PID") is running...";
else
echo "ERROR: "$PROCESS_NAME" has died but pid "$RUNNING_PID" exists. Try running:";
echo " sudo rm -f "$PROCESS_PID"";
RETVAL=1;
exit 1;
fi
else
IS_RUNNING=$(ps -ef|grep "$PROCESS_SCRIPT"|grep -v grep|wc -l);
if [ "$IS_RUNNING" -gt 0 ]; then
echo "ERROR: "$PROCESS_NAME" is running without a pid file";
RETVAL=1;
exit 1;
else
if [ "$1" == "start" ]; then
echo "ERROR: "$PROCESS_NAME" has failed to "$1"";
RETVAL=1;
exit 1;
fi
if [ "$1" == "restart" ]; then
echo "ERROR: "$PROCESS_NAME" has failed to "$1"";
RETVAL=1;
exit 1;
fi
if [ "$1" == "stop" ]; then
echo ""$PROCESS_NAME" is stopped";
RETVAL=999;
else
echo ""$PROCESS_NAME" is stopped";
RETVAL=999;
fi
fi
fi
}

start() {
# Only start the service if it is stopped.
# RETVAL is set to '999' if the service is stopped.
status >/dev/null 2>/dev/null;
if [ "$RETVAL" -eq 1 ]; then
echo "WARN: Something went wrong...";
exit 1;
fi
if [ ! "$RETVAL" -eq 999 ]; then
echo "Starting "$PROCESS_NAME": ";
exit 0;
fi

echo -n $"Starting "$PROCESS_NAME": "
MYCMD=""$PROCESS_SCRIPT" > "$PROCESS_LOG" 2>&1 & echo \$!";
#echo "$MYCMD";
su - "$PROCESS_USER" -c "$MYCMD" > "$PROCESS_PID";
RETVAL=$?;
if [ "$RETVAL" -eq 0 ]; then
echo -e "\t\t\t\t [$CLR_GREEN OK $CLR_RESET]";
fi
}

stop() {
echo -n $"Stopping "$PROCESS_NAME": "
if [ -f "$PROCESS_PID" ]; then
RUNNING_PID=$(cat $PROCESS_PID);
IS_RUNNING=$(ps -ef|grep "$RUNNING_PID"|grep "$PROCESS_SCRIPT"|wc -l);
if [ "$IS_RUNNING" -gt 0 ]; then
# Kill by program group id rather than PID
# to be sure all child processes are killed.
PGID=$(ps -o pgid= "$RUNNING_PID"|tr -d ' ');
kill -9 -"$PGID";
RETVAL=$?;
fi
rm -f "$PROCESS_PID";
if [ "$RETVAL" -eq 0 ]; then
echo -e "\t\t\t\t [$CLR_GREEN OK $CLR_RESET]";
fi
else
IS_RUNNING=$(ps -ef|grep "$PROCESS_SCRIPT"|grep -v grep|wc -l);
if [ "$IS_RUNNING" -gt 0 ]; then
echo -e "\t\t\t\t [$CLR_RED FAILED $CLR_RESET]";
echo "No pid file and "$PROCESS_NAME" is running. Try running:";
echo " sudo ps -ef|grep "$PROCESS_NAME"";
else
echo -e "\t\t\t\t [$CLR_RED FAILED $CLR_RESET]";
fi
fi
}

# See how we were called.
case "$1" in
start)
start
;;
stop)
stop
;;
status)
status
;;
restart)
stop
start
status
;;
*)
echo $"Usage: $0 {start|stop|status|restart}"
exit 1;
esac

 

Posted on

watching logs with perl

When you have a log file with lots of unpredictably formatted entries, it can be difficult to come up with a nice grok filter to parse it. This is frustrating if you only want a small amount of data from a very big log.
Below is a quickly written Perl script to watch a log file and print out a summary of the number of times a GET request, indicated with ‘q=’, was made for each 10 minute interval.
This gave me a nice summary.log which looked like this:

hits-per-10m=35,server=myservername,logdate=201701221032,year=2017,month=01,date=22,hour=10,minute=32,second=45


#!/usr/bin/perl
# Watch a log and write key-value pairs to a file

use File::Tail;
use URI::Escape ;

$file = File::Tail->new("/var/log/myfile.log");
%months = qw( Jan 1 Feb 2 Mar 3 Apr 4 May 5 Jun 6 Jul 7 Aug 8 Sep 9 Oct 10 Nov 11 Dec 12);
($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime();
$year=$year+1900;
$hold = int($min / 10);
$count=0;
$lasthour = 99;

while (defined(my $line= $file->read)) {
$line=~s/\s+/ /g;
@parts= split /[ ]/,$line;
# in my log, I could parse the values from the data line as follows
$server=$parts[3];
$month=$months{$parts[0]};
$date=$parts[1];
($hour,$min,$sec) = split /\:/, $parts[2];
($ipaddr,$port) = split /\:/,$parts[5];
# here I want to count the number of values for each 10 minute interval
# the problem with logs is that there may not be a log line for every minute
## so I use $hold to indicate whether to hold the data or write it to a file
## And in case the hour has changed since the last log entry, this is also checked
if ($lasthour != $hour) {
$hold = int($min / 10);
}
$lasthour = $hour;
if ( ($min >= 0) && ($min <=9) ){
if ($hold == 0 ) {
&printCount ;
$hold = 1;
}
}
if ( ($min >= 10) && ($min <=19) ){
if ($hold <= 1 ) {
&printCount ;
$hold = 2;
}
}
if ( ($min >= 20) && ($min <=29) ){
if ($hold <= 2 ) {
&printCount ;
$hold = 3;
}
}
if ( ($min >= 30) && ($min <=39) ){
if ($hold <= 3 ) {
&printCount ;
$hold = 4;
}
}
if ( ($min >= 40) && ($min <=49) ){
if ($hold <= 4 ) {
&printCount ;
$hold = 5;
}
}
if ( ($min >= 50) && ($min <=59) ){
if ($hold <= 5 ) {
&printCount ;
$hold = 6;
}
}
$count++;
$lastmin = $min;
# check if the log entry has the value you are counting
# in this case I was looking for the search term which followed 'q='
if ($line =~ /GET/) {
($junk,$q) = split /[[\&\?]q=/, $line;
($q, $junk) = split /[\& ]/, $q;
$q = uri_unescape($q);
# remove any commas because I will be using them as separators
$q =~s/\,/ /g;
}
} # end while

sub printCount {
# print the time and the count of searches for the 10 minute period
open (OUT,">>/var/log/summary.log");
print OUT "hits-per-10m=$count,server=$server,logdate=$year$month$date$hour$min,year=$year,month=$month,date=$date,hour=$hour,minute=$min,second=$sec\n";
close OUT;
$count = 0;
}
 

Posted on

dynamically creating thumbnails using PHP and ffmpeg

You can create a thumbnail for a video file using mod_rewrite and php.

Let’s start with a URL to your server that is expecting an image to be returned.

http://www.myserver.com:8000/Image.svc/filename.png?file=videofile.mp4&frame=7500&width=360&height=160

If the image already exists, return it, if not, then create if from the video file frame using the height and width specified.
Add a section to Apache’s config file that is like this:

Listen 192.168.101.1:8000

<VirtualHost 192.168.101.1:8000>
 DocumentRoot "/var/www/html/thumbnails"
 # LogLevel alert rewrite:trace3
 <Directory "/var/www/html/thumbnails/Image.svc">
 RewriteEngine on
 RewriteCond %{REQUEST_FILENAME} !-f
 RewriteRule ^(.+)\.png$ gen-thumbnail.php [L,QSA]

This says to call gen-thumbnail.php if the requested file does not exist.

<?php
// This script is called by Apache httpd
// using the mod_rewrite engine
// for the condition 'file not found'
// It creates an image file and passes it back to the browser
 
// Get the full url
// If this fails, check that the server has the php module enabled
 
if (!isset($_SERVER['REQUEST_URI']))
{
       $_SERVER['REQUEST_URI'] = substr($_SERVER['PHP_SELF'],1 );
       if (isset($_SERVER['QUERY_STRING'])) { $_SERVER['REQUEST_URI'].='?'.$_SERVER['QUERY_STRING']; }
}
 
// Parse the url
$url = $_SERVER['REQUEST_URI'];
$path=(parse_url($url, PHP_URL_PATH));
$src = "http://$_SERVER[HTTP_HOST]$_SERVER[REQUEST_URI]";
$query=(parse_url($url, PHP_URL_QUERY));
$query =  urldecode($query);
$query = str_replace('&amp;','&',$query);
 
// Check that there is a file name
if(isset($_GET['file'])) {
  $videofile = $_GET['file'];
} else {
  print "No source video file, exiting.";
  exit;
}
 
// Set some defaults for width and height if not specified
if(isset($_GET['width'])) {
  $width = $_GET['width'];
} else {
  $width = '350';
}
if(isset($_GET['height'])) {
  $height = $_GET['height'];
} else {
  $height = '350';
}
 
// The thumbnail is generated from this frame, or a default if not specified
if(isset($_GET['frame'])) {
  $frame = $_GET['frame'];
} else {
  $frame = '3350';
}
 
parse_str($query);
$thumbSize=$width.'x'.$height;
 
// Build the ffmpeg command to generate video thumbnail
$ffmpeg_installation_path = "/bin/ffmpeg";
$videofile =  urldecode($videofile);
// Amend videofile is the videoa are stored in a different directory to the thumbnails
$videofile = escapeshellarg($videofile);
$thumbnail = "/var/www/html/thumbnails".$path;
$frameexp = "'select=gte(n\,".$frame.")'";
 
$cmd = "{$ffmpeg_installation_path}  -i {$videofile}  -an -vf {$frameexp}  -vframes 1  -s {$thumbSize} -r 1 -y -vcodec mjpeg -f image2 {$thumbnail} 2>&1";
//echo $cmd;
 
// Execute the ffmpeg command
if ($path != '/Image.svc/gen-thumbnail.php') {
   exec($cmd, $output, $retval);
}
 
// Check if a file has been created
if (file_exists($thumbnail)) {
  if (filesize ($thumbnail)>0)
    {
     $fp = fopen($thumbnail, 'rb');
    // send the right headers
     header("Content-Type: image/png");
     header("Content-Length: " . filesize($thumbnail));
    // dump the picture and stop the script
    fpassthru($fp);
    exit;
    }
}
 
// End of script

 

Posted on

Send Nagios alerts to Slack

A simple Perl script to send a Slack message summarising the over-night Nagios alerts.

## Nagios to Slack reporter
# Read Nagios log and send to summary to slack
# This module should be run from crontab# eg.30 08 * * 1, 2, 3, 4, 5 / usr / local / nagios / libexec / nagios - slack.pl
# to send a message to Slack showing the overnight issues
# Note: Nagios logs usually roll at midnight# so running this at 8:30 am will send a listing of issues
# from midnight to 8:30 am to Slack.
## Martin Jones## Version 1.0.0#

$ENV { 'PERL_LWP_SSL_VERIFY_HOSTNAME'} = 0;

use Net::SSL();
use Date::Manip;
use Time::Local;
use LWP::UserAgent;

# Put your own Slack api token here
my $slackurl = 'https://slack.com/api/chat.postMessage?token=xoxp-ZZZZZZZZZZ-YYYYYYYY-XXXXXXXXXXX-5dc5ac753b&channel=operations&username=nagios&parse=full&text=';

#use current time
$epoc = time();

# or choose a set time today
# ($sec, $min, $hour, $day, $month, $year, $wday, $yday, $isdst) = localtime();
# $hour = 10;
# $min = 30;
# Calculate 10:30 am today as an epoch value
# $epoc = timelocal($sec, $min, $hour, $day, $month - 1, $year);

# Read the log looking for alerts
open(LOG, "/usr/local/nagios/var/nagios.log");
while ($line = < LOG > ) {
@values = split / /,$line;
if (substr($values[0], 0, 1) eq "[") {
$values[0] = ~s/[\[\]]//g;
}
if ($values[0] < $epoc) {
if ($line = ~/SERVICE ALERT/) {
if (($line = ~/WARNING/) || ($line = ~/CRITICAL/)) {
if ($line = ~/HARD/) {
$realTime = localtime($values[0]);
@parts = split /\s+/,$realTime;
push @alertTime, substr($parts[3], 0, -3);
push @latest, $line;
}
}
}
}
} # end while
close LOG;
# Display a maximum of 10 alerts( in case it 's been a bad day)
if ($#latest > 9) {
$size = 9;
} else {
$size = $#latest;
}

$body = "Recent alerts: ";
for ($i = 0; $i <= $size; $i++) {
$line = pop @latest;
$alertTime = pop @alertTime;
@values = split /;/,$line;
($spare, $host) = split /:/, $values[0];
$body .= $host." ".$alertTime." ".$values[1]."; ";
}
if ($body eq "'Recent alerts: ") {
$body = "'No new alerts.";
}
$slackurl .= $body;
$slackresponse = http_request($slackurl);

sub http_request {
my $ua; my $req; my $res;
my $geturl = shift;
if (not defined $geturl or $geturl eq "") {
warn "No URL defined for http_request\n";
return 0;
}
$ua = LWP::UserAgent -> new(ssl_opts => {verify_hostname => 0}, timeout => 180, );
$ua -> env_proxy;
$ua -> agent('Mozilla/5.0');
$req = HTTP::Request -> new(GET => $geturl);
# send request
$res = $ua -> request($req);
# check the outcome
if ($res -> is_success) {
return $res -> decoded_content;
} else {
print "Error: ".$res -> status_line."\n";
return 0;
}
}