[seiscomp, scanloc] Install, add .gitignore

This commit is contained in:
2025-10-09 15:07:02 +02:00
commit 20f5301bb1
2848 changed files with 1315858 additions and 0 deletions

566
etc/init/access.py Normal file
View File

@ -0,0 +1,566 @@
from __future__ import print_function
import os
import string
import time
import re
import glob
import shutil
import sys
import random
import fnmatch
import seiscomp.core
import seiscomp.config
import seiscomp.kernel
import seiscomp.system
import seiscomp.client
import seiscomp.datamodel
DEBUG = 0
def parseBindPort(bind):
bindToks = bind.split(":")
if len(bindToks) == 1:
return int(bindToks[0])
elif len(bindToks) == 2:
return int(bindToks[1])
else:
return -1
def collectParams(container):
params = {}
for i in range(container.groupCount()):
params.update(collectParams(container.group(i)))
for i in range(container.structureCount()):
params.update(collectParams(container.structure(i)))
for i in range(container.parameterCount()):
p = container.parameter(i)
if p.symbol.stage == seiscomp.system.Environment.CS_UNDEFINED:
continue
params[p.variableName] = ",".join(p.symbol.values)
return params
def logd(message):
"""
Debugging method
"""
if DEBUG:
print(message, file=sys.stderr)
sys.stderr.flush()
def log(message):
"""
Helper method for outputting with flushing
"""
print(message, file=sys.stdout)
sys.stdout.flush()
class InventoryResolver(object):
def __init__(self, inventory):
self._inventory = inventory
pass
"""
Those should be internal methods only
"""
def _overlaps(self, pstart, pend, cstart, cend):
if cstart is None and cend is None:
return True
if cstart is None:
cstart = seiscomp.core.Time()
if pend is not None:
if pend > cstart:
if cend is None or pstart < cend:
return True
else:
if cend is None or pstart < cend:
return True
return False
def _getEnd(self, obj):
try:
return obj.end()
except ValueError:
return None
def _codeMatch(self, obj, code):
if not code:
return True
if fnmatch.fnmatch(str(obj.code()).upper(), code.strip().upper()):
return True
return False
def _collect(self, objs, count, code, start, end):
items = []
for i in range(0, count):
obj = objs(i)
# Check code
if not self._codeMatch(obj, code):
continue
# Check time
if not self._overlaps(obj.start(), self._getEnd(obj), start, end):
continue
items.append(obj)
return items
def _findStreams(self, location, code, start, end):
items = self._collect(location.stream, location.streamCount(), code, start, end)
if len(items) == 0:
raise Exception(
"Location %s / %s does not have a stream named: %s in the time range %s / %s "
% (location.code(), location.start(), code, start, end)
)
return items
def _findLocations(self, station, code, start, end):
items = self._collect(
station.sensorLocation, station.sensorLocationCount(), code, start, end
)
if len(items) == 0:
raise Exception(
"Station %s / %s does not have a location named: %s in the time range %s / %s "
% (station.code(), station.start(), code, start, end)
)
return items
def _findStations(self, network, code, start, end):
items = self._collect(network.station, network.stationCount(), code, start, end)
if len(items) == 0:
raise Exception(
"Network %s / %s does not have a station named: %s in the time range %s / %s "
% (network.code(), network.start(), code, start, end)
)
return items
def _findNetworks(self, code, start, end):
items = self._collect(
self._inventory.network, self._inventory.networkCount(), code, start, end
)
if len(items) == 0:
raise Exception(
"Inventory does not have a network named: %s in the time range %s / %s "
% (code, start, end)
)
return items
def _truncateDate(self, obj, currentDate):
if currentDate < obj.start():
return obj.start()
end = self._getEnd(obj)
if end and currentDate > end:
return end
return currentDate
"""
Public methods that should be used
"""
def findStartDate(self, network, start, end):
if start is None:
return network.start()
return self._truncateDate(network, start)
def findEndDate(self, network, start, end):
if end is None:
try:
return network.end()
except ValueError:
return None
return self._truncateDate(network, end)
def expandStream(self, stations, streams, start, end):
items = []
for strm in streams.split(","):
(locationCode, streamCode) = ("." + strm).split(".")[-2:]
for station in stations:
try:
for location in self._findLocations(
station, locationCode, start, end
):
if locationCode:
currentLocCode = location.code()
else:
currentLocCode = ""
try:
for stream in self._findStreams(
location, streamCode, start, end
):
try:
items.index((currentLocCode, stream.code()))
except:
items.append((currentLocCode, stream.code()))
except Exception as e:
pass
except Exception as e:
pass
return items
def expandNetworkStation(self, ncode, scode, start, end):
items = []
for network in self._findNetworks(ncode, start, end):
try:
stations = self._findStations(network, scode, start, end)
except Exception as e:
logd(str(e))
continue
# Append
items.append((network, stations))
if len(items) == 0:
raise Exception(
"Cannot find suitable %s network with station code %s ranging from %s / %s"
% (ncode, scode, start, end)
)
return items
class AccessUpdater(seiscomp.client.Application):
def __init__(self, argc, argv):
seiscomp.client.Application.__init__(self, argc, argv)
self.setLoggingToStdErr(True)
self.setMessagingEnabled(True)
self.setDatabaseEnabled(True, True)
self.setAutoApplyNotifierEnabled(False)
self.setInterpretNotifierEnabled(False)
self.setMessagingUsername("_sccfgupd_")
##self.setLoadConfigModuleEnabled(True)
# Load all configuration modules
##self.setConfigModuleName("")
self.setPrimaryMessagingGroup(seiscomp.client.Protocol.LISTENER_GROUP)
def send(self, *args):
"""
A simple wrapper that sends a message and tries to resend it in case of
an error.
"""
while not self.connection().send(*args):
log("sending failed, retrying")
time.sleep(1)
def sendNotifiers(self, group):
Nsize = seiscomp.datamodel.Notifier.Size()
if Nsize > 0:
logd("trying to apply %d changes..." % Nsize)
else:
logd("no changes to apply")
return
Nmsg = seiscomp.datamodel.Notifier.GetMessage(True)
it = Nmsg.iter()
msg = seiscomp.datamodel.NotifierMessage()
maxmsg = 100
sent = 0
mcount = 0
try:
try:
while it.get():
msg.attach(seiscomp.datamodel.Notifier.Cast(it.get()))
mcount += 1
if msg and mcount == maxmsg:
sent += mcount
logd(
f"sending message ({sent / float(Nsize) * 100.0:5.1f} %)"
)
self.send(group, msg)
msg.clear()
mcount = 0
# self.sync("_sccfgupd_")
it.next()
except:
pass
finally:
if msg.size():
logd("sending message (%5.1f %%)" % 100.0)
self.send(group, msg)
msg.clear()
# self.sync("_sccfgupd_")
def run(self):
"""
Reimplements the main loop of the application. This methods collects
all bindings and updates the database. It searches for already existing
objects and updates them or creates new objects. Objects that is didn't
touched are removed. This tool is the only one that should writes the
configuration into the database and thus manages the content.
"""
# Initialize the basic directories
filebase = seiscomp.system.Environment.Instance().installDir()
descdir = os.path.join(filebase, "etc", "descriptions")
keydir = os.path.join(filebase, "etc", "key", self.name())
# Load definitions of the configuration schema
defs = seiscomp.system.SchemaDefinitions()
if defs.load(descdir) == False:
log("could not read descriptions")
return False
if defs.moduleCount() == 0:
log("no modules defined, nothing to do")
return False
# Create a model from the schema and read its configuration including
# all bindings.
model = seiscomp.system.Model()
model.create(defs)
model.readConfig()
mod_access = model.module("access")
existingAccess = {}
routing = self.query().loadRouting()
inventory = self.query().loadInventory()
iResolver = InventoryResolver(inventory)
seiscomp.datamodel.Notifier.Enable()
seiscomp.datamodel.Notifier.SetCheckEnabled(False)
# Update access on basis of access module
if mod_access:
logd("Working on access bindings")
for staid in mod_access.bindings.keys():
binding = mod_access.getBinding(staid)
if not binding:
continue
params = {}
for i in range(binding.sectionCount()):
params.update(collectParams(binding.section(i)))
access_users = params.get("access.users")
access_start = params.get("access.start")
access_end = params.get("access.end")
access_netonly = params.get("access.disableStationCode")
access_streams = params.get("access.streams")
if access_netonly is None or access_netonly == "false":
access_netonly = False
else:
access_netonly = True
if not access_users:
continue
networkCode = staid.networkCode
stationCode = staid.stationCode
if access_start:
access_start = seiscomp.core.Time.FromString(
access_start, "%Y-%m-%d %H:%M:%S"
)
if access_end:
access_end = seiscomp.core.Time.FromString(
access_end, "%Y-%m-%d %H:%M:%S"
)
if access_netonly:
stationCode = ""
## Resolve Inventory
try:
networkList = iResolver.expandNetworkStation(
networkCode, stationCode, access_start, access_end
)
except Exception as e:
# log("Access issue, cannot find network object for %s %s::\n\t %s" % (staid.networkCode, staid.stationCode, str(e)))
for user in access_users.split(","):
existingAccess[
(networkCode, "", "", "", user, "1980-01-01 00:00:00")
] = (None,)
continue
## Generate routes for each network found
for network, stations in networkList:
## Resolve start date / end date of routing to be generated
aStart = iResolver.findStartDate(network, access_start, access_end)
aEnd = iResolver.findEndDate(network, access_start, access_end)
if not access_streams:
for user in access_users.split(","):
existingAccess[
(
networkCode,
stationCode,
"",
"",
user,
aStart.toString("%Y-%m-%d %H:%M:%S"),
)
] = (aEnd,)
continue
## Add the route or routes for this net
for locationCode, streamCode in iResolver.expandStream(
stations, access_streams, access_start, access_end
):
for user in access_users.split(","):
existingAccess[
(
networkCode,
stationCode,
locationCode,
streamCode,
user,
aStart.toString("%Y-%m-%d %H:%M:%S"),
)
] = (aEnd,)
for (networkCode, stationCode, locationCode, streamCode, user, start), (
end,
) in existingAccess.items():
access = routing.access(
seiscomp.datamodel.AccessIndex(
networkCode,
stationCode,
locationCode,
streamCode,
user,
seiscomp.core.Time.FromString(start, "%Y-%m-%d %H:%M:%S"),
)
)
if not access:
access = seiscomp.datamodel.Access()
access.setNetworkCode(networkCode)
access.setStationCode(stationCode)
access.setLocationCode(locationCode)
access.setStreamCode(streamCode)
access.setUser(user)
access.setStart(
seiscomp.core.Time.FromString(start, "%Y-%m-%d %H:%M:%S")
)
access.setEnd(end)
routing.add(access)
else:
update = False
try:
cend = access.end()
if (not end) or (end and cend != end):
access.setEnd(end)
update = True
except ValueError as e:
if end:
access.setEnd(end)
update = True
if update:
access.update()
i = 0
while i < routing.accessCount():
access = routing.access(i)
if (
access.networkCode(),
access.stationCode(),
access.locationCode(),
access.streamCode(),
access.user(),
access.start().toString("%Y-%m-%d %H:%M:%S"),
) not in existingAccess:
routing.remove(access)
continue
i += 1
self.sendNotifiers("ROUTING")
return True
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def start(self):
return 0
def updateConfig(self):
messaging = True
messagingPort = 18180
messagingProtocol = "scmp"
try:
messaging = self.env.getBool("messaging.enable")
except:
pass
# If messaging is disabled in kernel.cfg, do not do anything
if not messaging:
log("- messaging disabled, nothing to do")
return 0
# Load scmaster configuration and figure the bind ports of scmaster out
cfg = seiscomp.config.Config()
seiscomp.system.Environment.Instance().initConfig(cfg, "scmaster")
# First check the unencrypted port and prefer that
p = parseBindPort(cfg.getString("interface.bind"))
if p > 0:
messagingPort = p
try:
bind = self.env.getString("messaging.bind")
bindToks = bind.split(":")
if len(bindToks) == 1:
messagingPort = int(bindToks[0])
elif len(bindToks) == 2:
messagingPort = int(bindToks[1])
else:
sys.stdout.write(f"E invalid messaging bind parameter: {bind}\n")
sys.stdout.write(" expected either 'port' or 'ip:port'\n")
return 1
except:
pass
# Otherwise check if ssl is enabled
else:
p = parseBindPort(cfg.getString("interface.ssl.bind"))
if p > 0:
messagingPort = p
messagingProtocol = "scmps"
# Synchronize database configuration
params = [
self.name,
"--console",
"1",
"-H",
"%s://localhost:%d" % (messagingProtocol, messagingPort),
]
# Create the database update app and run it
# This app implements a seiscomp.client.Application and connects
# to localhost regardless of connections specified in global.cfg to
# prevent updating a remote installation by accident.
app = AccessUpdater(len(params), params)
return app()

57
etc/init/diskmon.py Normal file
View File

@ -0,0 +1,57 @@
from __future__ import print_function
import os, sys, subprocess as sub
import seiscomp.kernel, seiscomp.config
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def start(self):
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", "defaults", self.name + ".cfg"))
cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", self.name + ".cfg"))
try: cfg.readConfig(os.path.join(os.environ['HOME'], ".seiscomp", self.name + ".cfg"))
except: pass
run_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "run", self.name)
try: os.makedirs(run_dir)
except: pass
# Set defaults
threshold = 95
emails = []
try: threshold = cfg.getInt("threshold")
except: pass
try: emails = cfg.getStrings("emails")
except: pass
if len(emails) == 0:
sys.stderr.write("%s: warning: nothing to do, no email addresses configured\n" % self.name)
return 0
cmd = 'df | awk -v max="%d" \'/[0-9]%%/ { use = $5; gsub("%%", "", use); if ( int(use) > max ) print $0; }\'' % threshold
p = sub.Popen(['sh', '-c', cmd], stdout=sub.PIPE)
msg = p.stdout.read().decode()
statfile = os.path.join(run_dir, "msg_sent")
if msg.find('\n') < 0:
# Nothing to do
try: os.remove(statfile)
except: print("ERROR: could not remove stat file %s" % statfile)
return 1
# Message already sent?
if os.path.exists(statfile):
return 0
try: hostname = os.uname()[1]
except: hostname = 'unknown host'
msg = "The following disks at %s are nearly full:\n\n" % hostname + msg
try: open(statfile, "w")
except: print("ERROR: could not create stat file in %s" % statfile)
os.system('echo "%s" | mail -s "disk nearly full" %s' % (msg, " ".join(emails)))
return 0

18
etc/init/ew2sc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

57
etc/init/fdsnws.py Normal file
View File

@ -0,0 +1,57 @@
import os
import subprocess
import time
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
super().__init__(env, env.moduleName(__file__))
def supportsAliases(self):
# The default handler does not support aliases
return True
def reload(self):
if not self.isRunning():
self.env.log(f"{self.name} is not running")
return 1
self.env.log(f"reloading {self.name}")
lockfile = self.env.lockFile(self.name)
reloadfile = os.path.join(os.path.dirname(lockfile), f"{self.name}.reload")
# Open pid file
with open(lockfile, "r", encoding="utf-8") as f:
# Try to read the pid
pid = int(f.readline())
# touch reload file
with open(reloadfile, "a", encoding="utf-8") as _:
pass
if not os.path.isfile(reloadfile):
self.env.log(f"could not touch reload file: {reloadfile}")
return 1
# Send SIGHUP
subprocess.call(f"kill -s HUP {pid}", shell=True)
# wait for reload file to disappear
for _ in range(0, int(self.reloadTimeout * 5)):
time.sleep(0.2)
if not os.path.isfile(reloadfile):
return 0
self.env.log("timeout exceeded")
return 1
# Uncomment for authbind (running service on privileged ports)
# def _run(self):
# params = "--depth 2 " + self.env.binaryFile(self.name) + " " + self._get_start_params()
# binaryPath = "authbind"
# return self.env.start(self.name, binaryPath, params)

26
etc/init/kernel.py Normal file
View File

@ -0,0 +1,26 @@
import os, sys
import seiscomp.config, seiscomp.kernel
class Module(seiscomp.kernel.CoreModule):
def __init__(self, env):
seiscomp.kernel.CoreModule.__init__(
self, env, env.moduleName(__file__))
# High priority
self.order = -100
# This is a config module which writes the setup config to kernel.cfg
self.isConfigModule = True
def setup(self, setup_config):
cfgfile = os.path.join(self.env.SEISCOMP_ROOT, "etc", self.name + ".cfg")
cfg = seiscomp.config.Config()
cfg.readConfig(cfgfile)
try:
cfg.setString("organization", setup_config.getString(
"kernel.global.organization"))
except:
cfg.remove("organization")
cfg.writeConfig()
return 0

18
etc/init/ql2sc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scalert.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scamp.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scanloc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scautoloc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scautopick.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scdb.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scesv.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scevent.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scevtlog.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scheli.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scimex.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scimport.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

83
etc/init/scinv.py Normal file
View File

@ -0,0 +1,83 @@
import os
import sys
import seiscomp.kernel
import seiscomp.config
import seiscomp.system
def parseBindPort(bind):
bindToks = bind.split(":")
if len(bindToks) == 1:
return int(bindToks[0])
elif len(bindToks) == 2:
return int(bindToks[1])
else:
return -1
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, "inventory")
# This is a config module which synchronizes bindings with the database
self.isConfigModule = True
# Give this module a high priority to be executed at first (unless
# another module defines a negative value. It ensures that successive
# modules can read an up-to-date inventory and use the latest rc files.
self.order = 0
def updateConfig(self):
messaging = True
messagingPort = 18180
messagingProtocol = "scmp"
try:
messaging = self.env.getBool("messaging.enable")
except:
pass
# If messaging is disabled in kernel.cfg, do not do anything
if not messaging:
sys.stdout.write("- messaging disabled, nothing to do\n")
return 0
# Load scmaster configuration and figure the bind ports of scmaster out
cfg = seiscomp.config.Config()
seiscomp.system.Environment.Instance().initConfig(cfg, "scmaster")
# First check the unencrypted port and prefer that
p = parseBindPort(cfg.getString("interface.bind"))
if p > 0:
messagingPort = p
try:
bind = self.env.getString("messaging.bind")
bindToks = bind.split(":")
if len(bindToks) == 1:
messagingPort = int(bindToks[0])
elif len(bindToks) == 2:
messagingPort = int(bindToks[1])
else:
sys.stdout.write(f"E invalid messaging bind parameter: {bind}\n")
sys.stdout.write(" expected either 'port' or 'ip:port'\n")
return 1
except:
pass
# Otherwise check if ssl is enabled
else:
p = parseBindPort(cfg.getString("interface.ssl.bind"))
if p > 0:
messagingPort = p
messagingProtocol = "scmps"
# Synchronize inventory
return os.system(
'scinv sync --console=1 -H %s://localhost:%d/production --filebase "%s" --rc-dir "%s" --key-dir "%s"'
% (
messagingProtocol,
messagingPort,
os.path.join(self.env.root, "etc", "inventory"),
os.path.join(self.env.root, "var", "lib", "rc"),
os.path.join(self.env.root, "etc", "key"),
)
)

18
etc/init/scm.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scmag.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scmapcut.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

734
etc/init/scmaster.py Normal file
View File

@ -0,0 +1,734 @@
from __future__ import print_function
import os
import shutil
import shlex
import sys
import subprocess
import tempfile
from seiscomp import config, kernel, system
# Python version depended string conversion
if sys.version_info[0] < 3:
py3bstr = str
py3ustr = str
else:
py3bstr = lambda s: s.encode("utf-8")
py3ustr = lambda s: s.decode("utf-8", "replace")
class DBParams:
def __init__(self):
self.db = None
self.rwuser = None
self.rwpwd = None
self.rouser = None
self.ropwd = None
self.rohost = None
self.rwhost = None
self.drop = False
self.create = False
def check_output(cmd):
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True
)
out = proc.communicate()
return [py3ustr(out[0]), py3ustr(out[1]), proc.returncode]
def addEntry(cfg, param, item):
# Adds an item to a parameter list
try:
items = cfg.getStrings(param)
if len(items) == 1 and items[0] == "":
items.clear()
except ValueError:
items = config.VectorStr()
if item not in items:
items.push_back(item)
cfg.setStrings(param, items)
def removeEntry(cfg, param, item):
# Removes an items from a parameter list
try:
items = cfg.getStrings(param)
it = items.begin()
while it != items.end():
if it.value() == item:
items.erase(it)
cfg.setStrings(param, items)
break
it.next()
except ValueError:
# No parameter set, nothing to do
pass
# The kernel module which starts scmaster if enabled
class Module(kernel.CoreModule):
def __init__(self, env):
kernel.CoreModule.__init__(self, env, env.moduleName(__file__))
# High priority
self.order = -1
# Default values
self.messaging = True
self.messagingBind = None
try:
self.messaging = self.env.getBool("messaging.enable")
except ValueError:
pass
try:
self.messagingBind = self.env.getString("messaging.bind")
except ValueError:
pass
# Add master port
def _get_start_params(self):
if self.messagingBind:
return (
kernel.Module._get_start_params(self)
+ " --bind %s" % self.messagingBind
)
return kernel.Module._get_start_params(self)
def start(self):
if not self.messaging:
print(
"[kernel] {} is disabled by config".format(self.name), file=sys.stderr
)
return 1
appConfig = system.Environment.Instance().appConfigFileName(self.name)
localConfig = system.Environment.Instance().configFileName(self.name)
lockFile = os.path.join(self.env.SEISCOMP_ROOT, self.env.lockFile(self.name))
try:
needRestart = False
started = os.path.getmtime(lockFile)
try:
needRestart = started < os.path.getmtime(appConfig)
except Exception:
pass
try:
needRestart = started < os.path.getmtime(localConfig)
except Exception:
pass
if needRestart:
self.stop()
except Exception:
pass
return kernel.CoreModule.start(self)
def check(self):
if not self.messaging:
print(
"[kernel] {} is disabled by config".format(self.name), file=sys.stderr
)
return 0
return kernel.CoreModule.check(self)
def status(self, shouldRun):
if not self.messaging:
shouldRun = False
return kernel.CoreModule.status(self, shouldRun)
def readDBParams(self, params, setup_config):
try:
params.db = setup_config.getString(
self.name + ".database.enable.backend.db"
)
except ValueError as err:
print(err)
print(" - database name not set, ignoring setup", file=sys.stderr)
return False
try:
params.rwhost = setup_config.getString(
self.name + ".database.enable.backend.rwhost"
)
except ValueError:
print(" - database host (rw) not set, ignoring setup", file=sys.stderr)
return False
try:
params.rwuser = setup_config.getString(
self.name + ".database.enable.backend.rwuser"
)
except ValueError:
print(" - database user (rw) not set, ignoring setup", file=sys.stderr)
return False
try:
params.rwpwd = setup_config.getString(
self.name + ".database.enable.backend.rwpwd"
)
except ValueError:
print(" - database password (rw) not set, ignoring setup", file=sys.stderr)
return False
try:
params.rohost = setup_config.getString(
self.name + ".database.enable.backend.rohost"
)
except ValueError:
print(" - database host (ro) not set, ignoring setup", file=sys.stderr)
return False
try:
params.rouser = setup_config.getString(
self.name + ".database.enable.backend.rouser"
)
except ValueError:
print(" - database user (ro) not set, ignoring setup", file=sys.stderr)
return False
try:
params.ropwd = setup_config.getString(
self.name + ".database.enable.backend.ropwd"
)
except ValueError:
print(" - database password (ro) not set, ignoring setup", file=sys.stderr)
return False
try:
params.create = setup_config.getBool(
self.name + ".database.enable.backend.create"
)
except ValueError:
params.create = False
try:
params.drop = setup_config.getBool(
self.name + ".database.enable.backend.create.drop"
)
except ValueError:
params.drop = False
return True
def setup(self, setup_config):
schemapath = os.path.join(self.env.SEISCOMP_ROOT, "share", "db")
cfg = config.Config()
system.Environment.Instance().initConfig(cfg, self.name)
try:
dbenable = setup_config.getBool(self.name + ".database.enable")
except ValueError:
print(" - database.enable not set, ignoring setup", file=sys.stderr)
return 0
dbBackend = None
if not dbenable:
removeEntry(cfg, "queues.production.plugins", "dbstore")
removeEntry(cfg, "queues.production.processors.messages", "dbstore")
cfg.remove("queues.production.processors.messages.dbstore.driver")
cfg.remove("queues.production.processors.messages.dbstore.read")
cfg.remove("queues.production.processors.messages.dbstore.write")
else:
try:
dbBackend = setup_config.getString(
self.name + ".database.enable.backend"
)
except ValueError:
print(" - database backend not set, ignoring setup", file=sys.stderr)
return 1
if dbBackend == "mysql/mariadb":
dbBackend = "mysql"
try:
rootpwd = setup_config.getString(
self.name + ".database.enable.backend.create.rootpw"
)
except ValueError:
rootpwd = ""
try:
runAsSuperUser = setup_config.getBool(
self.name + ".database.enable.backend.create.runAsSuperUser"
)
except ValueError:
runAsSuperUser = False
try:
characterSet = setup_config.getString(
self.name + ".database.enable.backend.create.characterset"
)
except ValueError:
characterSet = None
params = DBParams()
if not self.readDBParams(params, setup_config):
return 1
cfg.setString(
"queues.production.processors.messages.dbstore.read",
"{}:{}@{}/{}".format(
params.rouser, params.ropwd, params.rohost, params.db
),
)
cfg.setString(
"queues.production.processors.messages.dbstore.write",
"{}:{}@{}/{}".format(
params.rwuser, params.rwpwd, params.rwhost, params.db
),
)
if params.create:
dbScript = os.path.join(schemapath, "mysql_setup.py")
options = [
params.db,
params.rwuser,
params.rwpwd,
params.rouser,
params.ropwd,
params.rwhost,
rootpwd,
str(params.drop),
schemapath,
]
if characterSet is not None:
options.append(characterSet)
binary = os.path.join(schemapath, "pkexec_wrapper.sh")
print(
"+ Running MySQL database setup script {}".format(dbScript),
file=sys.stderr,
)
if runAsSuperUser:
cmd = "{} seiscomp-python {} {}".format(
binary, dbScript, " ".join(shlex.quote(o) for o in options)
)
else:
cmd = "{} {}".format(dbScript, " ".join(shlex.quote(o) for o in options))
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
if ret != 0:
print(" - Failed to setup database", file=sys.stderr)
return 1
elif dbBackend == "postgresql":
dbBackend = "postgresql"
params = DBParams()
if not self.readDBParams(params, setup_config):
return 1
cfg.setString(
"queues.production.processors.messages.dbstore.read",
"{}:{}@{}/{}".format(
params.rouser, params.ropwd, params.rohost, params.db
),
)
cfg.setString(
"queues.production.processors.messages.dbstore.write",
"{}:{}@{}/{}".format(
params.rwuser, params.rwpwd, params.rwhost, params.db
),
)
if params.create:
try:
tmpPath = tempfile.mkdtemp()
os.chmod(tmpPath, 0o755)
tmpPath = os.path.join(tmpPath, "setup")
try:
shutil.copytree(schemapath, tmpPath)
filename = os.path.join(
self.env.SEISCOMP_ROOT, "bin", "seiscomp-python"
)
shutil.copy(filename, tmpPath)
except Exception as err:
print(err)
return 1
dbScript = os.path.join(tmpPath, "postgres_setup.py")
options = [
params.db,
params.rwuser,
params.rwpwd,
params.rouser,
params.ropwd,
params.rwhost,
str(params.drop),
tmpPath,
]
binary = os.path.join(schemapath, "pkexec_wrapper.sh")
print(
"+ Running PostgreSQL database setup script {}".format(
dbScript
),
file=sys.stderr,
)
cmd = '{} su postgres -c "{}/seiscomp-python {} {}"'.format(
binary, tmpPath, dbScript, " ".join(shlex.quote(o) for o in options)
)
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
if ret != 0:
print(" - Failed to setup database", file=sys.stderr)
return 1
finally:
try:
shutil.rmtree(tmpPath)
except OSError:
pass
elif dbBackend == "sqlite3":
dbBackend = "sqlite3"
dbScript = os.path.join(schemapath, "sqlite3_setup.py")
try:
create = setup_config.getBool(
self.name + ".database.enable.backend.create"
)
except BaseException:
create = False
try:
filename = setup_config.getString(
self.name + ".database.enable.backend.filename"
)
filename = system.Environment.Instance().absolutePath(filename)
except BaseException:
filename = os.path.join(
self.env.SEISCOMP_ROOT, "var", "lib", "seiscomp.db"
)
if not filename:
print(" - location not set, ignoring setup", file=sys.stderr)
return 1
try:
override = setup_config.getBool(
self.name + ".database.enable.backend.create.override"
)
except BaseException:
override = False
options = [filename, schemapath]
if create:
print(
"+ Running SQLite3 database setup script {}".format(dbScript),
file=sys.stderr,
)
cmd = "seiscomp-python {} {} {}".format(
dbScript, " ".join(shlex.quote(o) for o in options), override
)
p = subprocess.Popen(cmd, shell=True)
ret = p.wait()
if ret != 0:
print(" - Failed to setup database", file=sys.stderr)
return 1
cfg.setString(
"queues.production.processors.messages.dbstore.read", filename
)
cfg.setString(
"queues.production.processors.messages.dbstore.write", filename
)
# Configure db backend for scmaster
cfg.setString("core.plugins", "db" + dbBackend)
cfg.setString(
"queues.production.processors.messages.dbstore.driver", dbBackend
)
addEntry(cfg, "queues.production.plugins", "dbstore")
addEntry(cfg, "queues.production.processors.messages", "dbstore")
cfg.writeConfig(
system.Environment.Instance().configFileLocation(
self.name, system.Environment.CS_CONFIG_APP
)
)
# Now we need to insert the corresponding plugin to etc/global.cfg
# that all connected local clients can handle the database backend
if dbBackend:
cfgfile = os.path.join(self.env.SEISCOMP_ROOT, "etc", "global.cfg")
cfg = config.Config()
cfg.readConfig(cfgfile)
cfg.setString("core.plugins", "db" + dbBackend)
cfg.writeConfig()
return 0
def updateConfig(self):
cfg = config.Config()
system.Environment.Instance().initConfig(cfg, self.name)
try:
queues = cfg.getStrings("queues")
except ValueError:
queues = []
# iterate through all queues and check DB schema version if message
# processor dbstore is present
for queue in queues:
print("INFO: Checking queue '{}'".format(queue), file=sys.stderr)
try:
msgProcs = cfg.getStrings("queues.{}.processors.messages".format(queue))
if "dbstore" in msgProcs and not self.checkDBStore(cfg, queue):
return 1
except ValueError:
print(" * ignoring - no database backend configured", file=sys.stderr)
return 0
def checkDBStore(self, cfg, queue):
prefix = "queues.{}.processors.messages.dbstore".format(queue)
print(" * checking DB schema version", file=sys.stderr)
try:
backend = cfg.getString("{}.driver".format(prefix))
except ValueError:
print(
"WARNING: dbstore message processor activated but no "
"database backend configured",
file=sys.stderr,
)
return True
if backend not in ("mysql", "postgresql"):
print(
"WARNING: Only MySQL and PostgreSQL migrations are "
"supported right now. Please check and upgrade the "
"database schema version yourselves.",
file=sys.stderr,
)
return True
print(" * check database write access ... ", end="", file=sys.stderr)
# 1. Parse connection
try:
params = cfg.getString("{}.write".format(prefix))
except ValueError:
print("failed", file=sys.stderr)
print(
"WARNING: dbstore message processor activated but no "
"write connection configured",
file=sys.stderr,
)
return True
user = "sysop"
pwd = "sysop"
host = "localhost"
db = "seiscomp"
port = None
tmp = params.split("@")
if len(tmp) > 1:
params = tmp[1]
tmp = tmp[0].split(":")
if len(tmp) == 1:
user = tmp[0]
pwd = None
elif len(tmp) == 2:
user = tmp[0]
pwd = tmp[1]
else:
print("failed", file=sys.stderr)
print(
"WARNING: Invalid scmaster.cfg:{}.write, cannot check "
"schema version".format(prefix),
file=sys.stderr,
)
return True
tmp = params.split("/")
if len(tmp) > 1:
tmpHost = tmp[0]
db = tmp[1]
else:
tmpHost = tmp[0]
# get host name and port
tmp = tmpHost.split(":")
host = tmp[0]
if len(tmp) == 2:
try:
port = int(tmp[1])
except ValueError:
print("ERROR: Invalid port number {}".format(tmp[1]), file=sys.stderr)
return True
db = db.split("?")[0]
# 2. Try to login
if backend == "mysql":
cmd = 'mysql -u "%s" -h "%s" -D"%s" --skip-column-names' % (user, host, db)
if port:
cmd += " -P %d" % (port)
if pwd:
cmd += ' -p"%s"' % pwd.replace("$", "\\$")
cmd += " -e \"SELECT value from Meta where name='Schema-Version'\""
else:
if pwd:
os.environ["PGPASSWORD"] = pwd
cmd = 'psql -U "%s" -h "%s" -t "%s"' % (user, host, db)
if port:
cmd += " -p %d" % (port)
cmd += " -c \"SELECT value from Meta where name='Schema-Version'\""
out = check_output(cmd)
if out[2] != 0:
print("failed", file=sys.stderr)
print("WARNING: {} returned with error:".format(backend), file=sys.stderr)
print(out[1].strip(), file=sys.stderr)
return False
print("passed", file=sys.stderr)
version = out[0].strip()
print(" * database schema version is {}".format(version), file=sys.stderr)
try:
vmaj, vmin = [int(t) for t in version.split(".")]
vrev = 0
except ValueError:
try:
vmaj, vmin, vrev = [int(t) for t in version.split(".")]
except ValueError:
print(
"WARNING: wrong version format: expected MAJOR.MINOR[.REV]",
file=sys.stderr,
)
return True
strictVersionMatch = True
try:
strictVersionMatch = cfg.getBool("{}.strictVersionMatch".format(prefix))
except ValueError:
pass
if not strictVersionMatch:
print(" * database version check is disabled", file=sys.stderr)
return True
migrations = os.path.join(
self.env.SEISCOMP_ROOT, "share", "db", "migrations", backend
)
migration_paths = {}
vcurrmaj = 0
vcurrmin = 0
vcurrrev = 0
for f in os.listdir(migrations):
if os.path.isfile(os.path.join(migrations, f)):
name, ext = os.path.splitext(f)
if ext != ".sql":
continue
try:
vfrom, vto = name.split("_to_")
except ValueError:
continue
try:
vfrommaj, vfrommin, vfromrev = [int(t) for t in vfrom.split("_")]
except ValueError as e:
try:
vfrommaj, vfrommin = [int(t) for t in vfrom.split("_")]
vfromrev = 0
except ValueError:
continue
try:
vtomaj, vtomin, vtorev = [int(t) for t in vto.split("_")]
except ValueError:
try:
vtomaj, vtomin = [int(t) for t in vto.split("_")]
vtorev = 0
except ValueError:
continue
migration_paths[(vfrommaj, vfrommin, vfromrev)] = (
vtomaj,
vtomin,
vtorev,
)
if (vtomaj > vcurrmaj) or (
(vtomaj == vcurrmaj)
and (
(vtomin > vcurrmin)
or ((vtomin == vcurrmin) and (vtorev > vcurrrev))
)
):
vcurrmaj = vtomaj
vcurrmin = vtomin
vcurrrev = vtorev
print(
" * last migration version is %d.%d.%d" % (vcurrmaj, vcurrmin, vcurrrev),
file=sys.stderr,
)
if vcurrmaj == vmaj and vcurrmin == vmin and vcurrrev == vrev:
print(" * schema up-to-date", file=sys.stderr)
return True
if (vmaj, vmin, vrev) not in migration_paths:
print(" * no migrations found", file=sys.stderr)
return True
print(
" * migration to the current version is required. Apply the " "following",
file=sys.stderr,
)
print(
" database migration scripts in exactly the given order:",
file=sys.stderr,
)
print(" * seiscomp stop", file=sys.stderr)
def fn(maj, min, rev):
return "%d_%d_%d" % (maj, min, rev) if rev else "%d_%d" % (maj, min)
while (vmaj, vmin, vrev) in migration_paths:
(vtomaj, vtomin, vtorev) = migration_paths[(vmaj, vmin, vrev)]
fname = "%s_to_%s.sql" % (fn(vmaj, vmin, vrev), fn(vtomaj, vtomin, vtorev))
if backend == "mysql":
print(
" * mysql -u {} -h {} -p {} < {}".format(
user, host, db, os.path.join(migrations, fname)
),
file=sys.stderr,
)
elif backend == "postgresql":
print(
" * psql -U {} -h {} -d {} -W -f {}".format(
user, host, db, os.path.join(migrations, fname)
),
file=sys.stderr,
)
else:
print(
" * {}".format(os.path.join(migrations, fname)), file=sys.stderr
)
(vmaj, vmin, vrev) = (vtomaj, vtomin, vtorev)
print(" * seiscomp start", file=sys.stderr)
return False

31
etc/init/scmm.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scmv.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scolv.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scproclat.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scqc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scqcv.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/screloc.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/screpick.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scrttv.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

31
etc/init/scshowevent.py Normal file
View File

@ -0,0 +1,31 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# do not allow to start the module as daemon
def start(self):
self.env.log("%s cannot be started by design" % self.name)
return None
# do not allow to stop the module as daemon
def stop(self):
return None
def enable(self):
self.env.log("%s cannot be enabled by design" % self.name)
return None
# module should not run and not be listed upon seiscomp status or in scconfig system panel
def status(self, _shouldRun):
return 1
# do nothing
def updateConfig(self):
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scsohlog.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scvoice.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scwfas.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

18
etc/init/scwfparam.py Normal file
View File

@ -0,0 +1,18 @@
import seiscomp.kernel
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
def updateConfigProxy(self):
return "trunk"
def updateConfig(self):
# By default the "trunk" module must be configured to write the
# bindings into the database
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True

741
etc/init/seedlink.py Normal file
View File

@ -0,0 +1,741 @@
import os, string, time, re, glob, shutil, sys, importlib.util, resource
import seiscomp.kernel, seiscomp.config
try:
import seiscomp.system
hasSystem = True
except:
hasSystem = False
try:
import seiscomp.datamodel
import seiscomp.io
dbAvailable = True
except:
dbAvailable = False
'''
NOTE:
The plugin to be used for a station of configured with:
plugin = [type]
All plugin specific parameters are stored in plugin.[type].*.
All parameters from seedlink.cfg are not prefixed with "seedlink.".
Local parameters that are created from seedlink.cfg parameters are
prefixed with "seedlink.".
NOTE2: Support a database connection to get station descriptions.
'''
def _loadDatabase(dbUrl):
"""
Load inventory from a database, but only down to the station level.
"""
m = re.match("(?P<dbDriverName>^.*)://(?P<dbAddress>.+?:.+?@.+?/.+$)", dbUrl)
if not m:
raise Exception("error in parsing SC DB URL")
db = m.groupdict()
try:
registry = seiscomp.system.PluginRegistry.Instance()
registry.addPluginName("db" + db["dbDriverName"])
registry.loadPlugins()
except Exception as e:
raise(e) ### "Cannot load database driver: %s" % e)
dbDriver = seiscomp.io.DatabaseInterface.Create(db["dbDriverName"])
if dbDriver is None:
raise Exception("Cannot find database driver " + db["dbDriverName"])
if not dbDriver.connect(db["dbAddress"]):
raise Exception("Cannot connect to database at " + db["dbAddress"])
dbQuery = seiscomp.datamodel.DatabaseQuery(dbDriver)
if dbQuery is None:
raise Exception("Cannot get DB query object")
print(" Loading inventory from database ... ", file=sys.stderr)
inventory = seiscomp.datamodel.Inventory()
dbQuery.loadNetworks(inventory)
for ni in range(inventory.networkCount()):
dbQuery.loadStations(inventory.network(ni))
print("Done.", file=sys.stderr)
return inventory
def _loadStationDescriptions(inv):
"""From an inventory, prepare a dictionary of station code descriptions.
In theory, we should only use stations with current time windows.
"""
d = dict()
for ni in range(inv.networkCount()):
n = inv.network(ni)
net = n.code()
if net not in d:
d[net] = {}
for si in range(n.stationCount()):
s = n.station(si)
sta = s.code()
d[net][sta] = s.description()
try:
end = s.end()
except: # ValueException ???
end = None
#print "Found in inventory:", net, sta, end, s.description()
return d
class TemplateModule(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
self.pkgroot = self.env.SEISCOMP_ROOT
cfg = seiscomp.config.Config()
# Defaults Global + App Cfg
cfg.readConfig(os.path.join(self.pkgroot, "etc", "defaults", "global.cfg"))
cfg.readConfig(os.path.join(self.pkgroot, "etc", "defaults", self.name + ".cfg"))
# Config Global + App Cfg
cfg.readConfig(os.path.join(self.pkgroot, "etc", "global.cfg"))
cfg.readConfig(os.path.join(self.pkgroot, "etc", self.name + ".cfg"))
# User Global + App Cfg
cfg.readConfig(os.path.join(os.environ['HOME'], ".seiscomp", "global.cfg"))
cfg.readConfig(os.path.join(os.environ['HOME'], ".seiscomp", self.name + ".cfg"))
self.global_params = dict([(x, ",".join(cfg.getStrings(x))) for x in cfg.names()])
self.station_params = dict()
self.plugin_dir = os.path.join(self.pkgroot, "share", "plugins", "seedlink")
self.template_dir = os.path.join(self.pkgroot, "share", "templates", "seedlink")
self.alt_template_dir = "" #os.path.join(self.env.home
self.config_dir = os.path.join(self.pkgroot, "var", "lib", self.name)
self.database_str = ""
if "inventory_connection" in self.global_params:
#WRONG self.database_str = cfg.getStrings("seedlink.readConnection")
self.database_str = self.global_params["inventory_connection"]
#self.database_str = cfg.getStrings("seedlink.database.type")+cfg.getStrings("seedlink.database.parameters")
self.seedlink_station_descr = dict()
self.rc_dir = os.path.join(self.pkgroot, "var", "lib", "rc")
self.run_dir = os.path.join(self.pkgroot, "var", "run", self.name)
self.bindings_dir = os.path.join(self.pkgroot, "etc", "key")
self.key_dir = os.path.join(self.bindings_dir, self.name)
self.net = None
self.sta = None
def _read_station_config(self, cfg_file):
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(self.key_dir, cfg_file))
self.station_params = dict([(x, ",".join(cfg.getStrings(x))) for x in cfg.names()])
#self.station_params_ex = dict(filter(lambda s: s[1].find("$") != -1, [(x, ",".join(cfg.getStrings(x))) for x in cfg.names()]))
def _process_template(self, tpl_file, source=None, station_scope=True, print_error=True):
tpl_paths = []
if source:
tpl_paths.append(os.path.join(self.alt_template_dir, source))
tpl_paths.append(os.path.join(self.template_dir, source))
tpl_paths.append(self.alt_template_dir)
tpl_paths.append(self.template_dir)
params = self.global_params.copy()
#params_ex = self.global_params_ex.copy()
if station_scope:
params.update(self.station_params)
#params_ex.update(self.station_params_ex)
params['pkgroot'] = self.pkgroot
#for (p,v) in params_ex.items():
# try:
# t2 = seiscomp.kernel.Template(v)
# params[p] = t2.substitute(params)
#
# except (KeyError, ValueError):
# pass
return self.env.processTemplate(tpl_file, tpl_paths, params, print_error)
def param(self, name, station_scope=True, print_warning=False):
if station_scope:
try:
return self.station_params[name]
except KeyError:
pass
else:
try:
return self.global_params[name]
except KeyError:
pass
if print_warning:
if station_scope:
print("warning: parameter '%s' is not defined for station %s %s" % (name, self.net, self.sta))
else:
print("warning: parameter '%s' is not defined at global scope" % (name,))
raise KeyError
def setParam(self, name, value, station_scope=True):
self._set(name, value, station_scope)
def _get(self, name, station_scope=True):
try: return self.param(name, station_scope)
except KeyError: return ""
def _set(self, name, value, station_scope=True):
if station_scope:
self.station_params[name] = value
else:
self.global_params[name] = value
class Module(TemplateModule):
def __init__(self, env):
TemplateModule.__init__(self, env)
# Set kill timeout to 5 minutes
self.killTimeout = 300
def _run(self):
if self.env.syslog:
daemon_opt = '-D '
else:
daemon_opt = ''
daemon_opt += "-v -f " + os.path.join(self.config_dir, "seedlink.ini")
try:
lim = resource.getrlimit(resource.RLIMIT_NOFILE)
resource.setrlimit(resource.RLIMIT_NOFILE, (lim[1], lim[1]))
lim = resource.getrlimit(resource.RLIMIT_NOFILE)
print(" maximum number of open files set to", lim[0], file=sys.stderr)
except Exception as e:
print(" failed to raise the maximum number open files:", str(e), file=sys.stderr)
if "sequence_file_cleanup" in self.global_params:
try:
max_minutes = int(self.global_params["sequence_file_cleanup"])
if max_minutes > 0:
files = glob.glob(os.path.join(self.run_dir, "*.seq"))
for f in files:
if (time.time()-os.path.getmtime(f))/60 >= max_minutes:
print(" removing sequence file %s" % f, file=sys.stderr)
os.remove(f)
else:
print(" sequence_file_cleanup disabled", file=sys.stderr)
except ValueError:
print(" sequence_file_cleanup parameter is not a number: '%s'" % str(self.global_params["sequence_file_cleanup"]), file=sys.stderr)
return 1
return self.env.start(self.name, self.env.binaryFile(self.name), daemon_opt,\
not self.env.syslog)
def _getPluginHandler(self, source_type):
try:
return self.plugins[source_type]
except KeyError:
path = os.path.join(self.template_dir, source_type, "setup.py")
try: f = open(path, 'r')
except: return None
modname = '__seiscomp_seedlink_plugins_' + source_type
if modname in sys.modules:
mod = sys.modules[modname]
else:
# Create a module spec
spec = importlib.util.spec_from_file_location(modname, path)
# Create a module from the spec
mod = importlib.util.module_from_spec(spec)
# Load the module
spec.loader.exec_module(mod)
# store it in sys.modules
sys.modules[modname] = mod
# our namespace is the module dictionary
namespace = mod.__dict__
# test whether this has been done already
if not hasattr(mod, 'SeedlinkPluginHandler'):
code = f.read()
# compile and exec dynamic code in the module
exec(compile(code, '', 'exec'), namespace)
mod = namespace.get('SeedlinkPluginHandler')
handler = mod()
self.plugins[source_type] = handler
return handler
def _generateStationForIni(self):
ini = 'station %s description = "%s"\n' % \
(self._get('seedlink.station.id'), self._get('seedlink.station.description'))
ini += ' name = "%s"\n' % self._get('seedlink.station.code')
ini += ' network = "%s"\n' % self._get('seedlink.station.network')
if self._get('seedlink.station.access'):
ini += ' access = "%s"\n' % self._get('seedlink.station.access').replace(',',' ')
if self._get('seedlink.station.blanks'):
ini += ' blanks = "%s"\n' % self._get('seedlink.station.blanks')
if self._get('seedlink.station.encoding'):
ini += ' encoding = "%s"\n' % self._get('seedlink.station.encoding')
if self._get('seedlink.station.buffers'):
ini += ' buffers = "%s"\n' % self._get('seedlink.station.buffers')
if self._get('seedlink.station.segments'):
ini += ' segments = "%s"\n' % self._get('seedlink.station.segments')
if self._get('seedlink.station.segsize'):
ini += ' segsize = "%s"\n' % self._get('seedlink.station.segsize')
if self._get('seedlink.station.backfill_buffer'):
ini += ' backfill_buffer = "%s"\n' % self._get('seedlink.station.backfill_buffer')
if self._get('seedlink.station.sproc'):
ini += ' proc = "%s"\n' % self._get('seedlink.station.sproc')
ini += '\n'
return ini
def __process_station(self, profile):
if profile:
self.station_config_file = "profile_%s" % (profile,)
else:
self.station_config_file = "station_%s_%s" % (self.net, self.sta)
self._read_station_config(self.station_config_file)
# Generate plugin independent parameters
self._set('seedlink.station.id', self.net + '.' + self.sta)
self._set('seedlink.station.code', self.sta)
self._set('seedlink.station.network', self.net)
self._set('seedlink.station.access', self._get('access'))
self._set('seedlink.station.blanks', self._get('blanks'))
self._set('seedlink.station.encoding', self._get('encoding'))
self._set('seedlink.station.buffers', self._get('buffers'))
self._set('seedlink.station.segments', self._get('segments'))
self._set('seedlink.station.segsize', self._get('segsize'))
self._set('seedlink.station.backfill_buffer', self._get('backfill_buffer'))
self._set('seedlink.station.sproc', self._get('proc'))
# Supply station description:
# 1. try getting station description from a database
# 2. read station description from seiscomp/var/lib/rc/station_NET_STA
# 3. if not set, use the station code
description = ""
if len(self.seedlink_station_descr) > 0:
try:
description = self.seedlink_station_descr[self.net][self.sta]
except KeyError:
pass
if len(description) == 0:
try:
rc = seiscomp.config.Config()
rc.readConfig(os.path.join(self.rc_dir, "station_%s_%s" % (self.net, self.sta)))
description = rc.getString("description").decode()
except Exception as e:
# Maybe the rc file doesn't exist, maybe there's no readable description.
pass
if len(description) == 0:
description = self.sta
self._set('seedlink.station.description', description)
self.station_count += 1
if self._last_net != self.net:
print("+ network %s" % self.net)
self._last_net = self.net
print(" + station %s %s" % (self.sta, description))
# If real-time simulation is activated do not parse the sources
# and force the usage of the mseedfifo_plugin
if self.msrtsimul:
self._set('seedlink.station.sproc', '')
self.seedlink_station[(self.net, self.sta)] = self._generateStationForIni()
self._getPluginHandler('mseedfifo')
return
for source_type in self._get('sources').split(','):
if not source_type: continue
source_alias = source_type
toks = source_type.split(':')
if len(toks) > 2:
print("Error: invalid source identifier '%s', expected '[alias:]type'")
continue
elif len(toks) == 2:
source_alias = toks[0]
source_type = toks[1]
# Plugins are outsourced to external handlers
# that can be added with new plugins.
# This requires a handler file:
# share/templates/seedlink/$type/setup.py
pluginHandler = self._getPluginHandler(source_type)
if pluginHandler is None:
print("Error: no handler for plugin %s defined" % source_type)
continue
stat = source_type
if source_alias != source_type:
stat += " as " + source_alias
print(" + source %s" % stat)
# Backup original binding parameters
station_params = self.station_params.copy()
#station_params_ex = self.station_params_ex.copy()
# Modify parameter set. Remove alias definition with type string
if source_type != source_alias:
tmp_dict = {}
for x in self.station_params.keys():
if x.startswith('sources.%s.' % source_type): continue
if x.startswith('sources.%s.' % source_alias):
toks = x.split('.')
toks[1] = source_type
tmp_dict[".".join(toks)] = self.station_params[x]
else:
tmp_dict[x] = self.station_params[x]
self.station_params = tmp_dict
#tmp_dict = {}
#for x in self.station_params_ex.keys():
# if x.startswith('sources.%s.' % source_type): continue
# if x.startswith('sources.%s.' % source_alias):
# toks = x.split('.')
# toks[1] = source_type
# tmp_dict[".".join(toks)] = self.station_params_ex[x]
# else:
# tmp_dict[x] = self.station_params_ex[x]
#self.station_params_ex = tmp_dict
# Create source entry that ends up in seedlink.ini as plugin
try:
source_dict = self.seedlink_source[source_type]
except KeyError:
source_dict = {}
self.seedlink_source[source_type] = source_dict
source_key = pluginHandler.push(self)
if source_key is None:
source_key = source_type
else:
source_key = (source_type, source_key)
if source_key not in source_dict:
source_id = source_type + str(len(source_dict))
else:
(source_type, source_id) = source_dict[source_key][:2]
# Update internal parameters usable by a template
self._set('seedlink.source.type', source_type)
self._set('seedlink.source.id', source_id)
source_dict[source_key] = (source_type, source_id, self.global_params.copy(), self.station_params.copy())
# Create procs for this type for streams.xml
sproc_name = self._get('sources.%s.proc' % (source_type))
if sproc_name:
self.sproc_used = True
sproc_filename = "streams_%s.tpl" % sproc_name.split(':')[0]
sproc = self._process_template(sproc_filename, source_type, True, False)
if sproc:
self.sproc[sproc_name] = sproc
else:
print("WARNING: cannot find %s" % sproc_filename)
# Read plugins.ini template for this source and store content
# under the provided key for this binding
plugin_ini = self._process_template("plugins.ini.tpl", source_type, True, False)
if plugin_ini:
self.plugins_ini[source_key] = plugin_ini
templates = self._get('sources.%s.templates' % (source_type))
if templates:
for t in templates.split(','):
self.templates.add((t, source_type, 0))
# Allow plugin handler to override station id
station_params['seedlink.station.id'] = self.station_params['seedlink.station.id']
# Set original parameters
self.station_params = station_params
# Add station procs
sproc_name = self._get('proc')
if sproc_name:
self.sproc_used = True
sproc_filename = "streams_%s.tpl" % sproc_name
sproc = self._process_template(sproc_filename, None, True, False)
if sproc:
self.sproc[sproc_name] = sproc
else:
print("WARNING: cannot find %s" % sproc_filename)
# Create station section for seedlink.ini
self.seedlink_station[(self.net, self.sta)] = self._generateStationForIni()
def __load_stations(self):
self.seedlink_source = {}
self.seedlink_station = {}
self.plugins_ini = {}
self.sproc = {}
self.plugins = {}
self.sproc_used = False
self.station_count = 0
if self.env.syslog:
self._set('seedlink._daemon_opt', ' -D', False)
else:
self._set('seedlink._daemon_opt', '', False)
self._set('seedlink.plugin_dir', self.plugin_dir, False)
self._set('seedlink.config_dir', self.config_dir, False)
self._set('seedlink.run_dir', self.run_dir, False)
self._set('seedlink.filters', os.path.join(self.config_dir, "filters.fir"), False)
self._set('seedlink.streams', os.path.join(self.config_dir, "streams.xml"), False)
self.templates = set()
self.templates.add(("backup_seqfiles", None, 0o755))
rx_binding = re.compile(r'(?P<module>[A-Za-z0-9_\.-]+)(:(?P<profile>[A-Za-z0-9_-]+))?$')
files = glob.glob(os.path.join(self.bindings_dir, "station_*"))
files.sort()
self._last_net = ""
for f in files:
try:
(path, net, sta) = f.split('_')[-3:]
if not path.endswith("station"):
print("invalid path", f)
except ValueError:
print("invalid path", f)
continue
self.net = net
self.sta = sta
fd = open(f)
line = fd.readline()
while line:
line = line.strip()
if not line or line[0] == '#':
line = fd.readline()
continue
m = rx_binding.match(line)
if not m:
print("invalid binding in %s: %s" % (f, line))
line = fd.readline()
continue
if m.group('module') != self.name:
line = fd.readline()
continue
profile = m.group('profile')
self.__process_station(profile)
break
fd.close()
def _set_default(self, name, value, station_scope = True):
try: self.param(name, station_scope)
except: self._set(name, value, station_scope)
def supportsAliases(self):
return True
def requiresKernelModules(self):
return False
def updateConfig(self):
# Set default values
try: self._set_default("organization", self.env.getString("organization"), False)
except: pass
self._set_default("lockfile", os.path.join("@ROOTDIR@", self.env.lockFile(self.name)), False)
self._set_default("filebase", os.path.join("@ROOTDIR@", "var", "lib", self.name, "buffer"), False)
self._set_default("port", "18000", False)
self._set_default("encoding", "steim2", False)
self._set_default("trusted", "127.0.0.0/8", False)
self._set_default("access", "0.0.0.0/0", False)
self._set_default("stream_check", "true", False)
self._set_default("window_extraction", "true", False)
self._set_default("window_extraction_trusted", "true", False)
self._set_default("websocket", "false", False)
self._set_default("websocket_trusted", "false", False)
self._set_default("buffers", "100", False)
self._set_default("segments", "50", False)
self._set_default("segsize", "1000", False)
self._set_default("gap_check_pattern", "", False)
self._set_default("gap_treshold", "", False)
self._set_default("info", "streams", False)
self._set_default("info_trusted", "all", False)
self._set_default("request_log", "true", False)
self._set_default("proc_gap_warn", "10", False)
self._set_default("proc_gap_flush", "100000", False)
self._set_default("proc_gap_reset", "1000000", False)
self._set_default("backfill_buffer", "0", False)
self._set_default("seq_gap_limit", "100000", False)
self._set_default("connections", "500", False)
self._set_default("connections_per_ip", "20", False)
self._set_default("bytespersec", "0", False)
# This seedlink version expectes composed station ids: net.sta
self._set("composed_station_id", "true", False)
## Expand the @Variables@
if hasSystem:
e = seiscomp.system.Environment.Instance()
self.setParam("filebase", e.absolutePath(self.param("filebase", False)), False)
self.setParam("lockfile", e.absolutePath(self.param("lockfile", False)), False)
else:
self.setParam("filebase", self.param("filebase", False), False)
self.setParam("lockfile", self.param("lockfile", False), False)
if self._get("msrtsimul", False).lower() == "true":
self.msrtsimul = True
else:
self.msrtsimul = False
# Load custom stream processor definitions
custom_procs = self._process_template("streams_custom.tpl", None, True, False)
if custom_procs: self.sproc[""] = sproc
# Load descriptions from inventory:
if self.database_str:
if dbAvailable == True:
print(" Loading station descriptions from %s" % self.database_str, file=sys.stderr)
inv = _loadDatabase(self.database_str)
self.seedlink_station_descr = _loadStationDescriptions(inv)
else:
print(" Database configured but trunk is not installed", file=sys.stderr)
self.seedlink_station_descr = dict()
try: os.makedirs(self.config_dir)
except: pass
try: os.makedirs(self.run_dir)
except: pass
self.__load_stations()
for p in self.plugins.values():
p.flush(self)
if self.msrtsimul:
self.seedlink_source['mseedfifo'] = {1:('mseedfifo',1,self.global_params.copy(),{})}
if self._get("stream_check", False).lower() == "true":
self._set("stream_check", "enabled", False)
else:
self._set("stream_check", "disabled", False)
if self._get("window_extraction", False).lower() == "true":
self._set("window_extraction", "enabled", False)
else:
self._set("window_extraction", "disabled", False)
if self._get("window_extraction_trusted", False).lower() == "true":
self._set("window_extraction_trusted", "enabled", False)
else:
self._set("window_extraction_trusted", "disabled", False)
if self._get("websocket", False).lower() == "true":
self._set("websocket", "enabled", False)
else:
self._set("websocket", "disabled", False)
if self._get("websocket_trusted", False).lower() == "true":
self._set("websocket_trusted", "enabled", False)
else:
self._set("websocket_trusted", "disabled", False)
if self._get("request_log", False).lower() == "true":
self._set("request_log", "enabled", False)
else:
self._set("request_log", "disabled", False)
self._set("name", self.name, False)
fd = open(os.path.join(self.config_dir, "seedlink.ini"), "w")
fd.write(self._process_template("seedlink_head.tpl", None, False))
if self.sproc_used:
fd.write(self._process_template("seedlink_sproc.tpl", None, False))
for i in self.seedlink_source.values():
for (source_type, source_id, self.global_params, self.station_params) in i.values():
source = self._process_template("seedlink_plugin.tpl", source_type)
if source:
fd.write(source)
fd.write(self._process_template("seedlink_station_head.tpl", None, False))
for k in sorted(self.seedlink_station.keys()):
fd.write(self.seedlink_station[k])
fd.close()
if self.plugins_ini:
fd = open(os.path.join(self.config_dir, "plugins.ini"), "w")
for i in self.plugins_ini.values():
fd.write(i)
fd.close()
else:
# If no plugins.ini is not used remove it from previous runs
try: os.remove(os.path.join(self.config_dir, "plugins.ini"))
except: pass
if self.sproc_used:
fd = open(self._get('seedlink.streams', False), "w")
fd.write('<streams>\n')
for i in self.sproc.values():
fd.write(i)
fd.write('</streams>\n')
fd.close()
fd = open(self._get('seedlink.filters', False), "w")
fd.write(self._process_template("filters.fir.tpl", None, False))
fd.close()
# If no stream procs are used, remove the generated files of a
# previous run
else:
try: os.remove(self._get('seedlink.streams', False))
except: pass
try: os.remove(self._get('seedlink.filters', False))
except: pass
for (f, s, perm) in self.templates:
fd = open(os.path.join(self.config_dir, f), "w")
fd.write(self._process_template(f + '.tpl', s, False))
fd.close()
if perm:
os.chmod(os.path.join(self.config_dir, f), perm)
return 0
def printCrontab(self):
print("55 23 * * * %s >/dev/null 2>&1" % (os.path.join(self.config_dir, "backup_seqfiles"),))

212
etc/init/slarchive.py Normal file
View File

@ -0,0 +1,212 @@
from __future__ import print_function
import os, string, time, re, glob
import seiscomp.kernel, seiscomp.config
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
self.archive_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", "archive")
self.config_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", self.name)
self.certs_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", "certs")
self.host = "127.0.0.1"
self.port = 18000
self.buffer = 1000
def _readConfig(self):
self.rc = {}
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", "defaults", self.name + ".cfg"))
cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", self.name + ".cfg"))
try: cfg.readConfig(os.path.join(os.environ['HOME'], ".seiscomp", self.name + ".cfg"))
except: pass
self.params = dict([(x, ",".join(cfg.getStrings(x))) for x in cfg.names()])
try: self.host = self.params['address']
except: self.params['address'] = self.host
try: self.port = int(self.params['port'])
except: self.params['port'] = self.port
try: self.buffer = self.params['buffer']
except: self.params['buffer'] = self.buffer
try:
self.archive_dir = self.params['archive']
if not os.path.isabs(self.archive_dir):
self.archive_dir = os.path.join(self.env.SEISCOMP_ROOT, self.archive_dir)
except: pass
self.params['archive'] = self.archive_dir
try:
self.certs_dir = self.params['validation.certs']
if not os.path.isabs(self.certs_dir):
self.certs_dir = os.path.join(self.env.SEISCOMP_ROOT, self.certs_dir)
except: pass
self.params['validation.certs'] = self.certs_dir
self.params['slarchive._config_dir'] = self.config_dir
return cfg
def _run(self):
cfg = self._readConfig()
mymodname = self.name + "_" + self.host + "_" + str(self.port)
config_file = os.path.join(self.config_dir, self.name + ".streams")
run_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "run", self.name)
try: os.makedirs(run_dir)
except: pass
try: os.makedirs(self.archive_dir)
except: pass
prog = "run_with_lock"
params = self.env.lockFile(self.name)
params += " " + self.name + ' -b -x "' + os.path.join(run_dir, mymodname + ".seq") + ':1000000"'
params += ' -SDS "%s"' % self.archive_dir
try: params += ' -B %d' % cfg.getInt('buffer')
except: pass
try: params += ' -nt %d' % cfg.getInt('networkTimeout')
except: params += ' -nt 900'
try: params += ' -nd %d' % cfg.getInt('delay')
except: pass
try: params += ' -i %d' % cfg.getInt('idleTimeout')
except: pass
try: params += ' -k %d' % cfg.getInt('keepalive')
except: pass
params += ' -Fi:1 -Fc:900 -l "%s" %s:%d' % (config_file,self.host,self.port)
try:
params += " -Cs %s" %cfg.getString('validation.mode')
try: params += ' -certs %s' % self.certs_dir
except: pass
except: pass
return self.env.start(self.name, prog, params, True)
def _processStation(self, key_dir, profile):
if profile:
station_config_file = "profile_%s" % (profile,)
else:
station_config_file = "station_%s_%s" % (self.net, self.sta)
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(key_dir, station_config_file))
line = self.net + " " + self.sta
try: line += " " + cfg.getString("selectors")
except: pass
keepdays = 30
try: keepdays = cfg.getInt("keep")
except: pass
rc = "STATION='%s'\n" % self.sta + \
"NET='%s'\n" % self.net + \
"ARCH_KEEP='%d'\n" % keepdays
self.rc[self.net + "_" + self.sta] = rc
return line
def requiresKernelModules(self):
return False
def updateConfig(self):
self._readConfig()
template_dir = os.path.join(self.env.SEISCOMP_ROOT, "share", "templates", "slarchive")
# Create purge_datafiles script
tpl_paths = [template_dir]
purge_script = self.env.processTemplate('purge_datafiles.tpl', tpl_paths, self.params, True)
if purge_script:
try: os.makedirs(self.config_dir)
except: pass
fd = open(os.path.join(self.config_dir, "purge_datafiles"), "w")
fd.write(purge_script)
fd.close()
os.chmod(os.path.join(self.config_dir, "purge_datafiles"), 0o755)
else:
try: os.remove(os.path.join(self.config_dir, "purge_datafiles"))
except: pass
rx_binding = re.compile(r'(?P<module>[A-Za-z0-9_\.-]+)(:(?P<profile>[A-Za-z0-9_-]+))?$')
bindings_dir = os.path.join(self.env.SEISCOMP_ROOT, "etc", "key")
key_dir = os.path.join(bindings_dir, self.name)
config_file = os.path.join(self.config_dir, self.name + ".streams")
# Remove config file
try: os.remove(config_file)
except: pass
config_fd = None
files = glob.glob(os.path.join(bindings_dir, "station_*"))
for f in files:
try:
(path, net, sta) = f.split('_')[-3:]
if not path.endswith("station"):
print("invalid path", f)
except ValueError:
print("invalid path", f)
continue
self.net = net
self.sta = sta
fd = open(f)
line = fd.readline()
while line:
line = line.strip()
if not line or line[0] == '#':
line = fd.readline()
continue
m = rx_binding.match(line)
if not m:
print("invalid binding in %s: %s" % (f, line))
line = fd.readline()
continue
if m.group('module') != self.name:
line = fd.readline()
continue
profile = m.group('profile')
line = self._processStation(key_dir, profile)
if line:
if not config_fd:
try: os.makedirs(self.config_dir)
except: pass
try: config_fd = open(config_file, "w")
except:
raise Exception("Error: unable to create slarchive config file '%s'" % config_file)
config_fd.write("%s\n" % line)
break
fd.close()
# Create rc file
rc_files = glob.glob(os.path.join(self.config_dir, "rc_*"))
for (station_id, rc) in self.rc.items():
fd = open(os.path.join(self.config_dir, "rc_%s" % (station_id,)), "w")
fd.write(rc)
fd.close()
# Clean up unused rc_* files
for rc in rc_files:
if os.path.basename(rc)[3:] not in self.rc:
try: os.remove(rc)
except: pass
return 0
def supportsAliases(self):
# The default handler does not support aliases
return True
def printCrontab(self):
print("20 3 * * * %s/purge_datafiles >/dev/null 2>&1" % (self.config_dir))

181
etc/init/slmon.py Normal file
View File

@ -0,0 +1,181 @@
from __future__ import print_function
import os, string, time, re, glob
import seiscomp.kernel, seiscomp.config
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
self.config_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", self.name)
self.rc_dir = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", "rc")
def _readConfig(self):
self.rc = {}
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", "defaults", self.name + ".cfg"))
try: cfg.readConfig(os.path.join(self.env.SEISCOMP_ROOT, "etc", self.name + ".cfg"))
except: pass
try: cfg.readConfig(os.path.join(os.environ['HOME'], ".seiscomp", self.name + ".cfg"))
except: pass
self.params = dict([(x, ",".join(cfg.getStrings(x))) for x in cfg.names()])
try: self.params['title']
except: self.params['title'] = "SeedLink Monitor"
try: self.params['refresh']
except: self.params['refresh'] = "180"
try: self.params['address']
except: self.params['address'] = "127.0.0.1"
try: int(self.params['port'])
except: self.params['port'] = 18000
try: self.params['email']
except: self.params['email'] = ""
try: self.params['wwwdir'] = self.params['wwwdir'].replace("@ROOTDIR@", self.env.SEISCOMP_ROOT).replace("@NAME@", self.name)
except: self.params['wwwdir'] = os.path.join(self.env.SEISCOMP_ROOT, "var", "run", "slmon")
# yet to be implemente correctly:
# live seismograms, lin in footer:
try: self.params['liveurl']
except: self.params['liveurl'] = "http://geofon.gfz-potsdam.de/waveform/liveseis.php?station=%s"
# favicon:
try: self.params['icon']
except: self.params['icon'] = "http://www.gfz-potsdam.de/favicon.ico"
# link name to external site in footer
try: self.params['linkname']
except: self.params['linkname'] = "GEOFON"
# link to external site in footer
try: self.params['linkurl']
except: self.params['linkurl'] = "http://www.gfz-potsdam.de/geofon/"
return cfg
def _run(self):
station_file = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", self.name, "stations.ini")
config_file = os.path.join(self.env.SEISCOMP_ROOT, "var", "lib", self.name, "config.ini")
prog = "run_with_lock"
params = self.env.lockFile(self.name)
params += " " + self.name + ' -s "' + station_file + '" -c "' + config_file + '"'
return self.env.start(self.name, prog, params, True)
def _processStation(self, key_dir, profile):
if profile:
station_config_file = "profile_%s" % (profile,)
else:
station_config_file = "station_%s_%s" % (self.net, self.sta)
cfg = seiscomp.config.Config()
cfg.readConfig(os.path.join(key_dir, station_config_file))
try: group = cfg.getString("group")
except: group = "local"
description = ""
try:
rc = seiscomp.config.Config()
rc.readConfig(os.path.join(self.rc_dir, "station_%s_%s" % (self.net, self.sta)))
description = rc.getString("description")
except Exception as e:
# Maybe the rc file doesn't exist, maybe there's no readable description.
pass
if len(description) == 0:
description = self.sta
content = "[" + self.net + "_" + self.sta + "]\n"
content += "net = %s\n" % self.net
content += "sta = %s\n" % self.sta
content += "info = %s\n" % description
content += "group = %s\n" % group
content += "type = real\n"
return content
def updateConfig(self):
self._readConfig()
template_dir = os.path.join(self.env.SEISCOMP_ROOT, "share", "templates", self.name)
# Create purge_datafiles script
tpl_paths = [template_dir]
config_file = self.env.processTemplate('config.tpl', tpl_paths, self.params, True)
if config_file:
try: os.makedirs(self.config_dir)
except: pass
fd = open(os.path.join(self.config_dir, "config.ini"), "w")
fd.write(config_file)
fd.close()
os.chmod(os.path.join(self.config_dir, "config.ini"), 0o755)
else:
try: os.remove(os.path.join(self.config_dir, "config.ini"))
except: pass
rx_binding = re.compile(r'(?P<module>[A-Za-z0-9_\.-]+)(:(?P<profile>[A-Za-z0-9_-]+))?$')
bindings_dir = os.path.join(self.env.SEISCOMP_ROOT, "etc", "key")
key_dir = os.path.join(bindings_dir, self.name)
config_file = os.path.join(self.config_dir, "stations.ini")
# Remove config file
try: os.remove(config_file)
except: pass
config_fd = None
files = glob.glob(os.path.join(bindings_dir, "station_*"))
for f in files:
try:
(path, net, sta) = f.split('_')[-3:]
if not path.endswith("station"):
print("invalid path", f)
except ValueError:
print("invalid path", f)
continue
self.net = net
self.sta = sta
fd = open(f)
line = fd.readline()
while line:
line = line.strip()
if not line or line[0] == '#':
line = fd.readline()
continue
m = rx_binding.match(line)
if not m:
print("invalid binding in %s: %s" % (f, line))
line = fd.readline()
continue
if m.group('module') != self.name:
line = fd.readline()
continue
profile = m.group('profile')
content = self._processStation(key_dir, profile)
if content:
if not config_fd:
try: os.makedirs(self.config_dir)
except: pass
try: config_fd = open(config_file, "w")
except:
raise Exception("Error: unable to create slarchive config file '%s'" % config_file)
config_fd.write("%s\n" % content)
break
fd.close()
return 0

107
etc/init/trunk.py Normal file
View File

@ -0,0 +1,107 @@
import os
import sys
import seiscomp.config
import seiscomp.kernel
import seiscomp.system
import seiscomp.bindings2cfg
def parseBindPort(bind):
bindToks = bind.split(':')
if len(bindToks) == 1:
return int(bindToks[0])
elif len(bindToks) == 2:
return int(bindToks[1])
else:
return -1
class Module(seiscomp.kernel.Module):
def __init__(self, env):
seiscomp.kernel.Module.__init__(self, env, env.moduleName(__file__))
# This is a config module which synchronizes bindings with the database
self.isConfigModule = True
def updateConfig(self):
messaging = True
messagingPort = 18180
messagingProtocol = 'scmp'
try:
messaging = self.env.getBool("messaging.enable")
except:
pass
# If messaging is disabled in kernel.cfg, do not do anything
if not messaging:
sys.stdout.write("- messaging disabled, nothing to do\n")
return 0
# Load scmaster configuration and figure the bind ports of scmaster out
cfg = seiscomp.config.Config()
seiscomp.system.Environment.Instance().initConfig(cfg, "scmaster")
# First check the unencrypted port and prefer that
p = parseBindPort(cfg.getString("interface.bind"))
if p > 0:
messagingPort = p
try:
bind = self.env.getString("messaging.bind")
bindToks = bind.split(':')
if len(bindToks) == 1:
messagingPort = int(bindToks[0])
elif len(bindToks) == 2:
messagingPort = int(bindToks[1])
else:
sys.stdout.write(
"E invalid messaging bind parameter: %s\n" % bind)
sys.stdout.write(" expected either 'port' or 'ip:port'\n")
return 1
except:
pass
# Otherwise check if ssl is enabled
else:
p = parseBindPort(cfg.getString("interface.ssl.bind"))
if p > 0:
messagingPort = p
messagingProtocol = 'scmps'
# Synchronize database configuration
params = [self.name, '--module-name', self.name, '--console', '1', '-H',
'%s://localhost:%d/production' % (messagingProtocol, messagingPort)]
# Create the database update app and run it
# This app implements a seiscomp.client.Application and connects
# to localhost regardless of connections specified in global.cfg to
# prevent updating a remote installation by accident.
app = seiscomp.bindings2cfg.ConfigDBUpdater(len(params), params)
app.setConnectionRetries(3)
return app()
def setup(self, setup_config):
cfgfile = os.path.join(self.env.SEISCOMP_ROOT, "etc", "global.cfg")
cfg = seiscomp.config.Config()
cfg.readConfig(cfgfile)
try:
cfg.setString("datacenterID", setup_config.getString(
"global.meta.datacenterID"))
except:
cfg.remove("datacenterID")
try:
cfg.setString("agencyID", setup_config.getString(
"global.meta.agencyID"))
except:
cfg.remove("agencyID")
try:
cfg.setString("organization", setup_config.getString(
"global.meta.organization"))
except:
cfg.remove("organization")
cfg.writeConfig()
return 0