2018-10-06 20:17:46 -05:00
|
|
|
# -*- coding: utf-8; -*-
|
2016-12-05 19:06:34 -06:00
|
|
|
################################################################################
|
|
|
|
#
|
|
|
|
# Rattail -- Retail Software Framework
|
2018-10-06 20:17:46 -05:00
|
|
|
# Copyright © 2010-2018 Lance Edgar
|
2016-12-05 19:06:34 -06:00
|
|
|
#
|
|
|
|
# This file is part of Rattail.
|
|
|
|
#
|
|
|
|
# Rattail is free software: you can redistribute it and/or modify it under the
|
2017-07-06 23:38:50 -05:00
|
|
|
# terms of the GNU General Public License as published by the Free Software
|
|
|
|
# Foundation, either version 3 of the License, or (at your option) any later
|
|
|
|
# version.
|
2016-12-05 19:06:34 -06:00
|
|
|
#
|
|
|
|
# Rattail is distributed in the hope that it will be useful, but WITHOUT ANY
|
|
|
|
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
2017-07-06 23:38:50 -05:00
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
|
|
|
|
# details.
|
2016-12-05 19:06:34 -06:00
|
|
|
#
|
2017-07-06 23:38:50 -05:00
|
|
|
# You should have received a copy of the GNU General Public License along with
|
|
|
|
# Rattail. If not, see <http://www.gnu.org/licenses/>.
|
2016-12-05 19:06:34 -06:00
|
|
|
#
|
|
|
|
################################################################################
|
|
|
|
"""
|
|
|
|
Tempmon server daemon
|
|
|
|
"""
|
|
|
|
|
|
|
|
from __future__ import unicode_literals, absolute_import
|
|
|
|
|
|
|
|
import time
|
|
|
|
import datetime
|
|
|
|
import logging
|
|
|
|
|
2018-10-07 18:16:18 -05:00
|
|
|
import six
|
2018-10-06 20:17:46 -05:00
|
|
|
import humanize
|
2018-10-08 00:52:16 -05:00
|
|
|
from sqlalchemy import orm
|
2018-10-07 18:16:18 -05:00
|
|
|
from sqlalchemy.exc import OperationalError
|
2018-10-06 20:17:46 -05:00
|
|
|
|
2016-12-05 19:06:34 -06:00
|
|
|
from rattail.db import Session, api
|
2016-12-05 20:59:05 -06:00
|
|
|
from rattail_tempmon.db import Session as TempmonSession, model as tempmon
|
2016-12-05 19:06:34 -06:00
|
|
|
from rattail.daemon import Daemon
|
|
|
|
from rattail.time import localtime, make_utc
|
|
|
|
from rattail.mail import send_email
|
|
|
|
|
|
|
|
|
|
|
|
log = logging.getLogger(__name__)
|
|
|
|
|
|
|
|
|
|
|
|
class TempmonServerDaemon(Daemon):
|
|
|
|
"""
|
|
|
|
Linux daemon implementation of tempmon server.
|
|
|
|
"""
|
|
|
|
timefmt = '%Y-%m-%d %H:%M:%S'
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
"""
|
|
|
|
Keeps an eye on tempmon readings and sends alerts as needed.
|
|
|
|
"""
|
|
|
|
self.extra_emails = self.config.getlist('rattail.tempmon', 'extra_emails', default=[])
|
2018-10-07 18:16:18 -05:00
|
|
|
delay = self.config.getint('rattail.tempmon', 'server.delay', default=60)
|
|
|
|
self.failed_checks = 0
|
|
|
|
|
2016-12-05 19:06:34 -06:00
|
|
|
while True:
|
|
|
|
self.check_readings()
|
2018-10-07 18:16:18 -05:00
|
|
|
time.sleep(delay)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
|
|
|
def check_readings(self):
|
2018-10-07 18:16:18 -05:00
|
|
|
|
|
|
|
# log.debug("checking readings")
|
2016-12-05 19:06:34 -06:00
|
|
|
self.now = make_utc()
|
|
|
|
session = TempmonSession()
|
2017-02-07 14:47:57 -06:00
|
|
|
|
2017-06-01 17:16:31 -05:00
|
|
|
try:
|
|
|
|
clients = session.query(tempmon.Client)\
|
2018-10-06 18:09:02 -05:00
|
|
|
.filter(tempmon.Client.enabled == True)\
|
|
|
|
.filter(tempmon.Client.archived == False)
|
2017-06-01 17:16:31 -05:00
|
|
|
for client in clients:
|
|
|
|
self.check_readings_for_client(session, client)
|
2018-10-07 18:16:18 -05:00
|
|
|
session.flush()
|
|
|
|
|
|
|
|
except Exception as error:
|
|
|
|
log_error = True
|
|
|
|
self.failed_checks += 1
|
2017-06-01 17:16:31 -05:00
|
|
|
session.rollback()
|
2018-10-07 18:16:18 -05:00
|
|
|
|
|
|
|
# our goal here is to suppress logging when we see connection
|
|
|
|
# errors which are due to a simple postgres restart. but if they
|
|
|
|
# keep coming then we'll go ahead and log them (sending email)
|
|
|
|
if isinstance(error, OperationalError):
|
|
|
|
|
|
|
|
# this first test works upon first DB restart, as well as the
|
|
|
|
# first time after DB stop. but in the case of DB stop,
|
|
|
|
# subsequent errors will instead match the second test
|
|
|
|
if error.connection_invalidated or (
|
|
|
|
'could not connect to server: Connection refused' in six.text_type(error)):
|
|
|
|
|
|
|
|
# only suppress logging for 3 failures, after that we let them go
|
|
|
|
# TODO: should make the max attempts configurable
|
|
|
|
if self.failed_checks < 4:
|
|
|
|
log_error = False
|
|
|
|
log.debug("database connection failure #%s: %s",
|
|
|
|
self.failed_checks,
|
|
|
|
six.text_type(error))
|
|
|
|
|
|
|
|
# send error email unless we're suppressing it for now
|
|
|
|
if log_error:
|
|
|
|
log.exception("Failed to check client probe readings (but will keep trying)")
|
|
|
|
|
|
|
|
else: # checks were successful
|
|
|
|
self.failed_checks = 0
|
2017-06-01 17:16:31 -05:00
|
|
|
session.commit()
|
2018-10-07 18:16:18 -05:00
|
|
|
|
2017-06-01 17:16:31 -05:00
|
|
|
finally:
|
|
|
|
session.close()
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2017-02-07 14:47:57 -06:00
|
|
|
def check_readings_for_client(self, session, client):
|
2018-10-06 18:09:02 -05:00
|
|
|
"""
|
|
|
|
Check readings for all (enabled) probes for the given client.
|
|
|
|
"""
|
|
|
|
# cutoff is calculated as the client delay (i.e. how often it takes
|
|
|
|
# readings) plus one minute. we "should" have a reading for each probe
|
|
|
|
# within that time window. if no readings are found we will consider
|
|
|
|
# the client to be (possibly) offline.
|
2017-02-07 14:47:57 -06:00
|
|
|
delay = client.delay or 60
|
|
|
|
cutoff = self.now - datetime.timedelta(seconds=delay + 60)
|
2018-10-06 18:09:02 -05:00
|
|
|
online = False
|
2017-02-07 14:47:57 -06:00
|
|
|
for probe in client.enabled_probes():
|
2018-10-19 14:58:30 -05:00
|
|
|
if self.check_readings_for_probe(session, probe, cutoff):
|
2018-10-17 19:26:48 -05:00
|
|
|
online = True
|
2018-09-28 11:57:27 -05:00
|
|
|
|
|
|
|
# if client was previously marked online, but we have no "new"
|
|
|
|
# readings, then let's look closer to see if it's been long enough to
|
|
|
|
# mark it offline
|
|
|
|
if client.online and not online:
|
|
|
|
|
|
|
|
# we consider client offline if it has failed to take readings for
|
|
|
|
# 3 times in a row. allow a one minute buffer for good measure.
|
|
|
|
cutoff = self.now - datetime.timedelta(seconds=(delay * 3) + 60)
|
|
|
|
reading = session.query(tempmon.Reading)\
|
|
|
|
.filter(tempmon.Reading.client == client)\
|
|
|
|
.filter(tempmon.Reading.taken >= cutoff)\
|
|
|
|
.first()
|
|
|
|
if not reading:
|
|
|
|
log.info("marking client as OFFLINE: {}".format(client))
|
|
|
|
client.online = False
|
|
|
|
send_email(self.config, 'tempmon_client_offline', {
|
|
|
|
'client': client,
|
|
|
|
'now': localtime(self.config, self.now, from_utc=True),
|
|
|
|
})
|
2018-02-07 17:47:00 -06:00
|
|
|
|
2017-02-07 14:47:57 -06:00
|
|
|
def check_readings_for_probe(self, session, probe, cutoff):
|
2018-10-06 18:09:02 -05:00
|
|
|
"""
|
|
|
|
Check readings for the given probe, within the time window defined by
|
|
|
|
the given cutoff.
|
|
|
|
"""
|
|
|
|
# we really only care about the latest reading
|
|
|
|
reading = session.query(tempmon.Reading)\
|
|
|
|
.filter(tempmon.Reading.probe == probe)\
|
|
|
|
.filter(tempmon.Reading.taken >= cutoff)\
|
|
|
|
.order_by(tempmon.Reading.taken.desc())\
|
|
|
|
.first()
|
|
|
|
if reading:
|
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
# is reading above critical max?
|
|
|
|
if reading.degrees_f >= probe.critical_temp_max:
|
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_CRITICAL_HIGH_TEMP, reading)
|
|
|
|
|
|
|
|
# is reading above good max?
|
|
|
|
elif reading.degrees_f >= probe.good_temp_max:
|
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_HIGH_TEMP, reading)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
# is reading below good min?
|
|
|
|
elif reading.degrees_f <= probe.good_temp_min:
|
2017-02-07 14:47:57 -06:00
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_LOW_TEMP, reading)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
# is reading below critical min?
|
|
|
|
elif reading.degrees_f <= probe.critical_temp_min:
|
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_CRITICAL_LOW_TEMP, reading)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2017-02-07 14:47:57 -06:00
|
|
|
else: # temp is good
|
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_GOOD_TEMP, reading)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2018-10-06 18:09:02 -05:00
|
|
|
return True
|
2016-12-05 19:06:34 -06:00
|
|
|
|
2018-10-06 18:09:02 -05:00
|
|
|
else: # no current readings for probe
|
|
|
|
self.update_status(probe, self.enum.TEMPMON_PROBE_STATUS_ERROR)
|
|
|
|
return False
|
2017-06-01 17:16:31 -05:00
|
|
|
|
2016-12-05 19:06:34 -06:00
|
|
|
def update_status(self, probe, status, reading=None):
|
2016-12-10 23:04:42 -06:00
|
|
|
data = {
|
|
|
|
'probe': probe,
|
|
|
|
'status': self.enum.TEMPMON_PROBE_STATUS[status],
|
|
|
|
'reading': reading,
|
|
|
|
'taken': localtime(self.config, reading.taken, from_utc=True) if reading else None,
|
2018-10-17 19:18:34 -05:00
|
|
|
'now': localtime(self.config, self.now, from_utc=True),
|
2016-12-10 23:04:42 -06:00
|
|
|
}
|
|
|
|
|
2016-12-05 19:06:34 -06:00
|
|
|
prev_status = probe.status
|
2016-12-11 10:46:53 -06:00
|
|
|
prev_alert_sent = probe.status_alert_sent
|
2016-12-05 19:06:34 -06:00
|
|
|
if probe.status != status:
|
|
|
|
probe.status = status
|
2018-10-19 14:58:30 -05:00
|
|
|
probe.start_status(status, self.now)
|
2016-12-05 19:06:34 -06:00
|
|
|
probe.status_changed = self.now
|
|
|
|
probe.status_alert_sent = None
|
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
# send "high temp" email if previous status was critical, even if
|
|
|
|
# we haven't been high for that long overall
|
|
|
|
if (status == self.enum.TEMPMON_PROBE_STATUS_HIGH_TEMP
|
|
|
|
and prev_status in (self.enum.TEMPMON_PROBE_STATUS_CRITICAL_HIGH_TEMP,
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_CRITICAL_TEMP)
|
|
|
|
and prev_alert_sent):
|
|
|
|
send_email(self.config, 'tempmon_high_temp', data)
|
|
|
|
probe.status_alert_sent = self.now
|
|
|
|
return
|
|
|
|
|
|
|
|
# send email when things go back to normal (i.e. from any other status)
|
2016-12-11 10:46:53 -06:00
|
|
|
if status == self.enum.TEMPMON_PROBE_STATUS_GOOD_TEMP and prev_alert_sent:
|
2016-12-10 23:04:42 -06:00
|
|
|
send_email(self.config, 'tempmon_good_temp', data)
|
2016-12-10 12:40:50 -06:00
|
|
|
probe.status_alert_sent = self.now
|
2018-10-19 14:58:30 -05:00
|
|
|
return
|
2016-12-10 12:40:50 -06:00
|
|
|
|
|
|
|
# no (more) email if status is good
|
2016-12-05 19:06:34 -06:00
|
|
|
if status == self.enum.TEMPMON_PROBE_STATUS_GOOD_TEMP:
|
|
|
|
return
|
|
|
|
|
|
|
|
# no email if we already sent one...until timeout is reached
|
|
|
|
if probe.status_alert_sent:
|
|
|
|
timeout = datetime.timedelta(minutes=probe.status_alert_timeout)
|
|
|
|
if (self.now - probe.status_alert_sent) <= timeout:
|
|
|
|
return
|
|
|
|
|
2016-12-10 12:40:50 -06:00
|
|
|
# delay even the first email, until configured threshold is reached
|
2018-10-19 14:58:30 -05:00
|
|
|
timeout = probe.timeout_for_status(status) or 0
|
|
|
|
timeout = datetime.timedelta(minutes=timeout)
|
|
|
|
started = probe.status_started(status) or probe.status_changed
|
|
|
|
if (self.now - started) <= timeout:
|
|
|
|
return
|
|
|
|
|
|
|
|
msgtypes = {
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_CRITICAL_HIGH_TEMP : 'tempmon_critical_high_temp',
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_HIGH_TEMP : 'tempmon_high_temp',
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_LOW_TEMP : 'tempmon_low_temp',
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_CRITICAL_LOW_TEMP : 'tempmon_critical_low_temp',
|
|
|
|
self.enum.TEMPMON_PROBE_STATUS_ERROR : 'tempmon_error',
|
|
|
|
}
|
|
|
|
|
|
|
|
self.send_email(status, msgtypes[status], data)
|
|
|
|
|
|
|
|
# maybe send more emails if config said so
|
|
|
|
for msgtype in self.extra_emails:
|
|
|
|
self.send_email(status, msgtype, data)
|
|
|
|
|
|
|
|
probe.status_alert_sent = self.now
|
|
|
|
|
|
|
|
def send_email(self, status, template, data):
|
|
|
|
probe = data['probe']
|
|
|
|
started = probe.status_started(status) or probe.status_changed
|
2016-12-10 12:40:50 -06:00
|
|
|
|
2018-10-17 18:08:54 -05:00
|
|
|
# determine URL for probe, if possible
|
|
|
|
url = self.config.get('tailbone', 'url.tempmon.probe', default='#')
|
|
|
|
data['probe_url'] = url.format(uuid=probe.uuid)
|
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
since = localtime(self.config, started, from_utc=True)
|
2018-10-06 20:17:46 -05:00
|
|
|
data['status_since'] = since.strftime('%I:%M %p')
|
2018-10-19 14:58:30 -05:00
|
|
|
data['status_since_delta'] = humanize.naturaltime(self.now - started)
|
2018-10-06 20:17:46 -05:00
|
|
|
|
2018-10-08 00:52:16 -05:00
|
|
|
# fetch last 90 minutes of readings
|
|
|
|
session = orm.object_session(probe)
|
|
|
|
recent_minutes = 90 # TODO: make configurable
|
2018-10-17 19:18:34 -05:00
|
|
|
cutoff = self.now - datetime.timedelta(seconds=(60 * recent_minutes))
|
2018-10-08 00:52:16 -05:00
|
|
|
readings = session.query(tempmon.Reading)\
|
|
|
|
.filter(tempmon.Reading.probe == probe)\
|
2018-10-17 19:18:34 -05:00
|
|
|
.filter(tempmon.Reading.taken >= cutoff)\
|
2018-10-08 00:52:16 -05:00
|
|
|
.order_by(tempmon.Reading.taken.desc())
|
|
|
|
data['recent_minutes'] = recent_minutes
|
|
|
|
data['recent_readings'] = readings
|
|
|
|
data['pretty_time'] = lambda dt: localtime(self.config, dt, from_utc=True).strftime('%Y-%m-%d %I:%M %p')
|
|
|
|
|
2018-10-19 14:58:30 -05:00
|
|
|
send_email(self.config, template, data)
|
2016-12-05 19:06:34 -06:00
|
|
|
|
|
|
|
|
|
|
|
def make_daemon(config, pidfile=None):
|
|
|
|
"""
|
|
|
|
Returns a tempmon server daemon instance.
|
|
|
|
"""
|
|
|
|
if not pidfile:
|
|
|
|
pidfile = config.get('rattail.tempmon', 'server.pid_path',
|
|
|
|
default='/var/run/rattail/tempmon-server.pid')
|
|
|
|
return TempmonServerDaemon(pidfile, config=config)
|